aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig19
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h1
-rw-r--r--drivers/gpu/drm/drm_cache.c28
-rw-r--r--drivers/gpu/drm/drm_crtc.c139
-rw-r--r--drivers/gpu/drm/drm_drv.c22
-rw-r--r--drivers/gpu/drm/drm_edid.c208
-rw-r--r--drivers/gpu/drm/drm_edid_load.c29
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h42
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c406
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c251
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c13
-rw-r--r--drivers/gpu/drm/gma500/Makefile5
-rw-r--r--drivers/gpu/drm/gma500/backlight.c45
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c72
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c236
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1950
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c12
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/gem.c9
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c90
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c101
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h46
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c13
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h20
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h28
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c13
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h197
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c16
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h16
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c21
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c17
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c23
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c588
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c18
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c251
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h239
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1514
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c65
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c391
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c144
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c194
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h328
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c220
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h25
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c165
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c144
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2129
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c401
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c115
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c221
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c99
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c420
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c152
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h20
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c210
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c70
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig36
-rw-r--r--drivers/gpu/drm/nouveau/Makefile225
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c236
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engine.c55
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c (renamed from drivers/gpu/drm/nouveau/nouveau_util.c)47
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c318
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c223
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c (renamed from drivers/gpu/drm/nouveau/nouveau_mm.c)174
-rw-r--r--drivers/gpu/drm/nouveau/core/core/namedb.c203
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c468
-rw-r--r--drivers/gpu/drm/nouveau/core/core/option.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c139
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h (renamed from drivers/gpu/drm/nouveau/nva3_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_copy.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c222
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c265
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c156
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc)2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h (renamed from drivers/gpu/drm/nouveau/nv98_crypt.fuc.h)4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c217
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c125
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/vga.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c185
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c630
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c208
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c349
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c502
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c420
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c647
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c628
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h (renamed from drivers/gpu/drm/nouveau/nouveau_grctx.h)26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c (renamed from drivers/gpu/drm/nouveau/nv40_grctx.c)133
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c (renamed from drivers/gpu/drm/nouveau/nv50_grctx.c)561
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c3039
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c2788
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h)66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc451
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h530
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc)8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h (renamed from drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h)89
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc780
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h857
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc (renamed from drivers/gpu/drm/nouveau/nvc0_graph.fuc)0
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc400
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1387
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1314
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c381
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c495
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c888
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c955
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h171
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c576
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h269
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c308
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c240
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c181
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c175
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h42
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/debug.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h136
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engine.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h71
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/namedb.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h188
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/option.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/subdev.h118
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h111
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h72
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h90
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h77
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h59
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/device.h24
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h134
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h64
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h60
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ibus.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h33
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h49
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mxm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h53
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vga.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h (renamed from drivers/gpu/drm/nouveau/nouveau_vm.h)87
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c263
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c215
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c479
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/bit.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/conn.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/perf.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pll.c417
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c177
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c359
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c (renamed from drivers/gpu/drm/nouveau/nouveau_ramht.h)56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pll.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c242
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c (renamed from drivers/gpu/drm/nouveau/nv50_calc.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c472
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c195
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c147
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c375
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c410
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c (renamed from drivers/gpu/drm/nouveau/nv98_ppp.c)69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h98
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c189
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c159
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c (renamed from drivers/gpu/drm/nouveau/nouveau_i2c.h)65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c130
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c120
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c148
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c498
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c245
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c271
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c169
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c194
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c212
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c407
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c230
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c138
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c172
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c93
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c49
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c80
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c75
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c290
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c193
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c233
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c234
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c116
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h73
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c249
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c (renamed from drivers/gpu/drm/nouveau/nouveau_vm.c)163
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c151
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c158
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c248
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c (renamed from drivers/gpu/drm/nouveau/nv50_vm.c)118
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c (renamed from drivers/gpu/drm/nouveau/nvc0_vm.c)123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c426
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c152
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c4567
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h178
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c439
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c396
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c219
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c259
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h94
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c280
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c693
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h144
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c512
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1655
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c226
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fifo.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c177
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c807
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c435
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c394
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c132
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c742
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c723
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c162
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c462
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h186
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c309
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c377
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1304
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c331
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c354
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c99
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c142
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c148
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c132
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c129
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h184
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c67
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c505
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c1325
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c192
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c137
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c123
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c1188
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fifo.c176
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c98
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv20_fb.c147
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c835
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c115
-rw-r--r--drivers/gpu/drm/nouveau/nv31_mpeg.c346
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c162
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c209
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c466
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c27
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c182
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c89
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c551
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h31
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c268
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c295
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c293
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c155
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c867
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c427
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mpeg.c241
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c247
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c203
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c133
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c237
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c205
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c127
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c249
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c216
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.c203
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c274
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.c243
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c134
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c150
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c477
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c897
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h97
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c2878
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c223
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c178
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c153
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c160
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c504
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c453
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.c831
-rw-r--r--drivers/gpu/drm/nouveau/nve0_graph.h89
-rw-r--r--drivers/gpu/drm/nouveau/nve0_grctx.c2777
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c663
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c367
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c282
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c61
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h7
-rw-r--r--drivers/gpu/drm/radeon/ni.c134
-rw-r--r--drivers/gpu/drm/radeon/nid.h1
-rw-r--r--drivers/gpu/drm/radeon/r100.c96
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r520.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c37
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c115
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h192
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c607
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h445
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h28
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c602
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c6
-rw-r--r--drivers/gpu/drm/radeon/rs600.c49
-rw-r--r--drivers/gpu/drm/radeon/rs690.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c18
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c113
-rw-r--r--drivers/gpu/drm/radeon/sid.h15
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig10
-rw-r--r--drivers/gpu/drm/shmobile/Makefile7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.c90
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_backlight.h23
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c763
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h60
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c361
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.h47
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c160
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.h34
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c268
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h22
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h311
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c3
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c26
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c7
-rw-r--r--drivers/gpu/drm/udl/udl_main.c7
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
548 files changed, 68568 insertions, 42650 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 90e28081712d..18321b68b880 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,7 +22,7 @@ menuconfig DRM
22config DRM_USB 22config DRM_USB
23 tristate 23 tristate
24 depends on DRM 24 depends on DRM
25 depends on USB_ARCH_HAS_HCD 25 depends on USB_SUPPORT && USB_ARCH_HAS_HCD
26 select USB 26 select USB
27 27
28config DRM_KMS_HELPER 28config DRM_KMS_HELPER
@@ -54,6 +54,21 @@ config DRM_TTM
54 GPU memory types. Will be enabled automatically if a device driver 54 GPU memory types. Will be enabled automatically if a device driver
55 uses it. 55 uses it.
56 56
57config DRM_GEM_CMA_HELPER
58 bool
59 depends on DRM
60 help
61 Choose this if you need the GEM CMA helper functions
62
63config DRM_KMS_CMA_HELPER
64 bool
65 select DRM_GEM_CMA_HELPER
66 select FB_SYS_FILLRECT
67 select FB_SYS_COPYAREA
68 select FB_SYS_IMAGEBLIT
69 help
70 Choose this if you need the KMS CMA helper functions
71
57config DRM_TDFX 72config DRM_TDFX
58 tristate "3dfx Banshee/Voodoo3+" 73 tristate "3dfx Banshee/Voodoo3+"
59 depends on DRM && PCI 74 depends on DRM && PCI
@@ -193,3 +208,5 @@ source "drivers/gpu/drm/ast/Kconfig"
193source "drivers/gpu/drm/mgag200/Kconfig" 208source "drivers/gpu/drm/mgag200/Kconfig"
194 209
195source "drivers/gpu/drm/cirrus/Kconfig" 210source "drivers/gpu/drm/cirrus/Kconfig"
211
212source "drivers/gpu/drm/shmobile/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index f65f65ed0ddf..2ff5cefe9ead 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,11 +15,13 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
15 drm_trace_points.o drm_global.o drm_prime.o 15 drm_trace_points.o drm_global.o drm_prime.o
16 16
17drm-$(CONFIG_COMPAT) += drm_ioc32.o 17drm-$(CONFIG_COMPAT) += drm_ioc32.o
18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
18 19
19drm-usb-y := drm_usb.o 20drm-usb-y := drm_usb.o
20 21
21drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o 22drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
22drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 23drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
24drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
23 25
24obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 26obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
25 27
@@ -45,4 +47,5 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
45obj-$(CONFIG_DRM_GMA500) += gma500/ 47obj-$(CONFIG_DRM_GMA500) += gma500/
46obj-$(CONFIG_DRM_UDL) += udl/ 48obj-$(CONFIG_DRM_UDL) += udl/
47obj-$(CONFIG_DRM_AST) += ast/ 49obj-$(CONFIG_DRM_AST) += ast/
50obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
48obj-y += i2c/ 51obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index aea439760b60..5ccf984f063a 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -94,7 +94,6 @@ struct ast_private {
94 struct drm_global_reference mem_global_ref; 94 struct drm_global_reference mem_global_ref;
95 struct ttm_bo_global_ref bo_global_ref; 95 struct ttm_bo_global_ref bo_global_ref;
96 struct ttm_bo_device bdev; 96 struct ttm_bo_device bdev;
97 atomic_t validate_sequence;
98 } ttm; 97 } ttm;
99 98
100 struct drm_gem_object *cursor_cache; 99 struct drm_gem_object *cursor_cache;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a6982b86df9b..7fc9f7272b56 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -582,7 +582,6 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
582 .mode_set_base = ast_crtc_mode_set_base, 582 .mode_set_base = ast_crtc_mode_set_base,
583 .disable = ast_crtc_disable, 583 .disable = ast_crtc_disable,
584 .load_lut = ast_crtc_load_lut, 584 .load_lut = ast_crtc_load_lut,
585 .disable = ast_crtc_disable,
586 .prepare = ast_crtc_prepare, 585 .prepare = ast_crtc_prepare,
587 .commit = ast_crtc_commit, 586 .commit = ast_crtc_commit,
588 587
@@ -737,6 +736,7 @@ static int ast_get_modes(struct drm_connector *connector)
737 if (edid) { 736 if (edid) {
738 drm_mode_connector_update_edid_property(&ast_connector->base, edid); 737 drm_mode_connector_update_edid_property(&ast_connector->base, edid);
739 ret = drm_add_edid_modes(connector, edid); 738 ret = drm_add_edid_modes(connector, edid);
739 kfree(edid);
740 return ret; 740 return ret;
741 } else 741 } else
742 drm_mode_connector_update_edid_property(&ast_connector->base, NULL); 742 drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 7f0d71ffba3f..6e0cc724e5a2 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -143,7 +143,6 @@ struct cirrus_device {
143 struct drm_global_reference mem_global_ref; 143 struct drm_global_reference mem_global_ref;
144 struct ttm_bo_global_ref bo_global_ref; 144 struct ttm_bo_global_ref bo_global_ref;
145 struct ttm_bo_device bdev; 145 struct ttm_bo_device bdev;
146 atomic_t validate_sequence;
147 } ttm; 146 } ttm;
148 bool mm_inited; 147 bool mm_inited;
149}; 148};
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index ec4698246213..a575cb2e6bdb 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -37,12 +37,13 @@ drm_clflush_page(struct page *page)
37{ 37{
38 uint8_t *page_virtual; 38 uint8_t *page_virtual;
39 unsigned int i; 39 unsigned int i;
40 const int size = boot_cpu_data.x86_clflush_size;
40 41
41 if (unlikely(page == NULL)) 42 if (unlikely(page == NULL))
42 return; 43 return;
43 44
44 page_virtual = kmap_atomic(page); 45 page_virtual = kmap_atomic(page);
45 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 46 for (i = 0; i < PAGE_SIZE; i += size)
46 clflush(page_virtual + i); 47 clflush(page_virtual + i);
47 kunmap_atomic(page_virtual); 48 kunmap_atomic(page_virtual);
48} 49}
@@ -100,6 +101,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
100EXPORT_SYMBOL(drm_clflush_pages); 101EXPORT_SYMBOL(drm_clflush_pages);
101 102
102void 103void
104drm_clflush_sg(struct sg_table *st)
105{
106#if defined(CONFIG_X86)
107 if (cpu_has_clflush) {
108 struct scatterlist *sg;
109 int i;
110
111 mb();
112 for_each_sg(st->sgl, sg, st->nents, i)
113 drm_clflush_page(sg_page(sg));
114 mb();
115
116 return;
117 }
118
119 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
120 printk(KERN_ERR "Timed out waiting for cache flush.\n");
121#else
122 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
123 WARN_ON_ONCE(1);
124#endif
125}
126EXPORT_SYMBOL(drm_clflush_sg);
127
128void
103drm_clflush_virt_range(char *addr, unsigned long length) 129drm_clflush_virt_range(char *addr, unsigned long length)
104{ 130{
105#if defined(CONFIG_X86) 131#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 271ffa4fdb47..ef1b22144d37 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -293,6 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
293{ 293{
294 int ret; 294 int ret;
295 295
296 kref_init(&fb->refcount);
297
296 ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); 298 ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
297 if (ret) 299 if (ret)
298 return ret; 300 return ret;
@@ -306,6 +308,38 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
306} 308}
307EXPORT_SYMBOL(drm_framebuffer_init); 309EXPORT_SYMBOL(drm_framebuffer_init);
308 310
311static void drm_framebuffer_free(struct kref *kref)
312{
313 struct drm_framebuffer *fb =
314 container_of(kref, struct drm_framebuffer, refcount);
315 fb->funcs->destroy(fb);
316}
317
318/**
319 * drm_framebuffer_unreference - unref a framebuffer
320 *
321 * LOCKING:
322 * Caller must hold mode config lock.
323 */
324void drm_framebuffer_unreference(struct drm_framebuffer *fb)
325{
326 struct drm_device *dev = fb->dev;
327 DRM_DEBUG("FB ID: %d\n", fb->base.id);
328 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
329 kref_put(&fb->refcount, drm_framebuffer_free);
330}
331EXPORT_SYMBOL(drm_framebuffer_unreference);
332
333/**
334 * drm_framebuffer_reference - incr the fb refcnt
335 */
336void drm_framebuffer_reference(struct drm_framebuffer *fb)
337{
338 DRM_DEBUG("FB ID: %d\n", fb->base.id);
339 kref_get(&fb->refcount);
340}
341EXPORT_SYMBOL(drm_framebuffer_reference);
342
309/** 343/**
310 * drm_framebuffer_cleanup - remove a framebuffer object 344 * drm_framebuffer_cleanup - remove a framebuffer object
311 * @fb: framebuffer to remove 345 * @fb: framebuffer to remove
@@ -319,6 +353,32 @@ EXPORT_SYMBOL(drm_framebuffer_init);
319void drm_framebuffer_cleanup(struct drm_framebuffer *fb) 353void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
320{ 354{
321 struct drm_device *dev = fb->dev; 355 struct drm_device *dev = fb->dev;
356 /*
357 * This could be moved to drm_framebuffer_remove(), but for
358 * debugging is nice to keep around the list of fb's that are
359 * no longer associated w/ a drm_file but are not unreferenced
360 * yet. (i915 and omapdrm have debugfs files which will show
361 * this.)
362 */
363 drm_mode_object_put(dev, &fb->base);
364 list_del(&fb->head);
365 dev->mode_config.num_fb--;
366}
367EXPORT_SYMBOL(drm_framebuffer_cleanup);
368
369/**
370 * drm_framebuffer_remove - remove and unreference a framebuffer object
371 * @fb: framebuffer to remove
372 *
373 * LOCKING:
374 * Caller must hold mode config lock.
375 *
376 * Scans all the CRTCs and planes in @dev's mode_config. If they're
377 * using @fb, removes it, setting it to NULL.
378 */
379void drm_framebuffer_remove(struct drm_framebuffer *fb)
380{
381 struct drm_device *dev = fb->dev;
322 struct drm_crtc *crtc; 382 struct drm_crtc *crtc;
323 struct drm_plane *plane; 383 struct drm_plane *plane;
324 struct drm_mode_set set; 384 struct drm_mode_set set;
@@ -349,11 +409,11 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
349 } 409 }
350 } 410 }
351 411
352 drm_mode_object_put(dev, &fb->base); 412 list_del(&fb->filp_head);
353 list_del(&fb->head); 413
354 dev->mode_config.num_fb--; 414 drm_framebuffer_unreference(fb);
355} 415}
356EXPORT_SYMBOL(drm_framebuffer_cleanup); 416EXPORT_SYMBOL(drm_framebuffer_remove);
357 417
358/** 418/**
359 * drm_crtc_init - Initialise a new CRTC object 419 * drm_crtc_init - Initialise a new CRTC object
@@ -376,6 +436,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
376 436
377 crtc->dev = dev; 437 crtc->dev = dev;
378 crtc->funcs = funcs; 438 crtc->funcs = funcs;
439 crtc->invert_dimensions = false;
379 440
380 mutex_lock(&dev->mode_config.mutex); 441 mutex_lock(&dev->mode_config.mutex);
381 442
@@ -1030,11 +1091,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
1030 } 1091 }
1031 1092
1032 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 1093 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
1033 fb->funcs->destroy(fb); 1094 drm_framebuffer_remove(fb);
1034 }
1035
1036 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
1037 crtc->funcs->destroy(crtc);
1038 } 1095 }
1039 1096
1040 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, 1097 list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
@@ -1042,6 +1099,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
1042 plane->funcs->destroy(plane); 1099 plane->funcs->destroy(plane);
1043 } 1100 }
1044 1101
1102 list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
1103 crtc->funcs->destroy(crtc);
1104 }
1105
1045 idr_remove_all(&dev->mode_config.crtc_idr); 1106 idr_remove_all(&dev->mode_config.crtc_idr);
1046 idr_destroy(&dev->mode_config.crtc_idr); 1107 idr_destroy(&dev->mode_config.crtc_idr);
1047} 1108}
@@ -1851,6 +1912,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1851 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 1912 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
1852 1913
1853 if (crtc_req->mode_valid) { 1914 if (crtc_req->mode_valid) {
1915 int hdisplay, vdisplay;
1854 /* If we have a mode we need a framebuffer. */ 1916 /* If we have a mode we need a framebuffer. */
1855 /* If we pass -1, set the mode with the currently bound fb */ 1917 /* If we pass -1, set the mode with the currently bound fb */
1856 if (crtc_req->fb_id == -1) { 1918 if (crtc_req->fb_id == -1) {
@@ -1886,14 +1948,20 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1886 1948
1887 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 1949 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1888 1950
1889 if (mode->hdisplay > fb->width || 1951 hdisplay = mode->hdisplay;
1890 mode->vdisplay > fb->height || 1952 vdisplay = mode->vdisplay;
1891 crtc_req->x > fb->width - mode->hdisplay || 1953
1892 crtc_req->y > fb->height - mode->vdisplay) { 1954 if (crtc->invert_dimensions)
1893 DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n", 1955 swap(hdisplay, vdisplay);
1894 mode->hdisplay, mode->vdisplay, 1956
1895 crtc_req->x, crtc_req->y, 1957 if (hdisplay > fb->width ||
1896 fb->width, fb->height); 1958 vdisplay > fb->height ||
1959 crtc_req->x > fb->width - hdisplay ||
1960 crtc_req->y > fb->height - vdisplay) {
1961 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
1962 fb->width, fb->height,
1963 hdisplay, vdisplay, crtc_req->x, crtc_req->y,
1964 crtc->invert_dimensions ? " (inverted)" : "");
1897 ret = -ENOSPC; 1965 ret = -ENOSPC;
1898 goto out; 1966 goto out;
1899 } 1967 }
@@ -2168,6 +2236,8 @@ static int format_check(const struct drm_mode_fb_cmd2 *r)
2168 case DRM_FORMAT_NV21: 2236 case DRM_FORMAT_NV21:
2169 case DRM_FORMAT_NV16: 2237 case DRM_FORMAT_NV16:
2170 case DRM_FORMAT_NV61: 2238 case DRM_FORMAT_NV61:
2239 case DRM_FORMAT_NV24:
2240 case DRM_FORMAT_NV42:
2171 case DRM_FORMAT_YUV410: 2241 case DRM_FORMAT_YUV410:
2172 case DRM_FORMAT_YVU410: 2242 case DRM_FORMAT_YVU410:
2173 case DRM_FORMAT_YUV411: 2243 case DRM_FORMAT_YUV411:
@@ -2334,11 +2404,7 @@ int drm_mode_rmfb(struct drm_device *dev,
2334 goto out; 2404 goto out;
2335 } 2405 }
2336 2406
2337 /* TODO release all crtc connected to the framebuffer */ 2407 drm_framebuffer_remove(fb);
2338 /* TODO unhock the destructor from the buffer object */
2339
2340 list_del(&fb->filp_head);
2341 fb->funcs->destroy(fb);
2342 2408
2343out: 2409out:
2344 mutex_unlock(&dev->mode_config.mutex); 2410 mutex_unlock(&dev->mode_config.mutex);
@@ -2488,8 +2554,7 @@ void drm_fb_release(struct drm_file *priv)
2488 2554
2489 mutex_lock(&dev->mode_config.mutex); 2555 mutex_lock(&dev->mode_config.mutex);
2490 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { 2556 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
2491 list_del(&fb->filp_head); 2557 drm_framebuffer_remove(fb);
2492 fb->funcs->destroy(fb);
2493 } 2558 }
2494 mutex_unlock(&dev->mode_config.mutex); 2559 mutex_unlock(&dev->mode_config.mutex);
2495} 2560}
@@ -3488,6 +3553,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3488 struct drm_framebuffer *fb; 3553 struct drm_framebuffer *fb;
3489 struct drm_pending_vblank_event *e = NULL; 3554 struct drm_pending_vblank_event *e = NULL;
3490 unsigned long flags; 3555 unsigned long flags;
3556 int hdisplay, vdisplay;
3491 int ret = -EINVAL; 3557 int ret = -EINVAL;
3492 3558
3493 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 3559 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3517,14 +3583,19 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3517 goto out; 3583 goto out;
3518 fb = obj_to_fb(obj); 3584 fb = obj_to_fb(obj);
3519 3585
3520 if (crtc->mode.hdisplay > fb->width || 3586 hdisplay = crtc->mode.hdisplay;
3521 crtc->mode.vdisplay > fb->height || 3587 vdisplay = crtc->mode.vdisplay;
3522 crtc->x > fb->width - crtc->mode.hdisplay || 3588
3523 crtc->y > fb->height - crtc->mode.vdisplay) { 3589 if (crtc->invert_dimensions)
3524 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n", 3590 swap(hdisplay, vdisplay);
3525 fb->width, fb->height, 3591
3526 crtc->mode.hdisplay, crtc->mode.vdisplay, 3592 if (hdisplay > fb->width ||
3527 crtc->x, crtc->y); 3593 vdisplay > fb->height ||
3594 crtc->x > fb->width - hdisplay ||
3595 crtc->y > fb->height - vdisplay) {
3596 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
3597 fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
3598 crtc->invert_dimensions ? " (inverted)" : "");
3528 ret = -ENOSPC; 3599 ret = -ENOSPC;
3529 goto out; 3600 goto out;
3530 } 3601 }
@@ -3717,6 +3788,8 @@ int drm_format_num_planes(uint32_t format)
3717 case DRM_FORMAT_NV21: 3788 case DRM_FORMAT_NV21:
3718 case DRM_FORMAT_NV16: 3789 case DRM_FORMAT_NV16:
3719 case DRM_FORMAT_NV61: 3790 case DRM_FORMAT_NV61:
3791 case DRM_FORMAT_NV24:
3792 case DRM_FORMAT_NV42:
3720 return 2; 3793 return 2;
3721 default: 3794 default:
3722 return 1; 3795 return 1;
@@ -3750,6 +3823,8 @@ int drm_format_plane_cpp(uint32_t format, int plane)
3750 case DRM_FORMAT_NV21: 3823 case DRM_FORMAT_NV21:
3751 case DRM_FORMAT_NV16: 3824 case DRM_FORMAT_NV16:
3752 case DRM_FORMAT_NV61: 3825 case DRM_FORMAT_NV61:
3826 case DRM_FORMAT_NV24:
3827 case DRM_FORMAT_NV42:
3753 return plane ? 2 : 1; 3828 return plane ? 2 : 1;
3754 case DRM_FORMAT_YUV410: 3829 case DRM_FORMAT_YUV410:
3755 case DRM_FORMAT_YVU410: 3830 case DRM_FORMAT_YVU410:
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c8fdf03f32c2..be174cab105a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -140,10 +140,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED), 140 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED), 141 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
142 142
143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 144 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
@@ -152,19 +152,19 @@ static struct drm_ioctl_desc drm_ioctls[] = {
152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 152 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 153 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 154 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), 155 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169}; 169};
170 170
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a2e54769344a..5dda07cf7097 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -161,7 +161,7 @@ MODULE_PARM_DESC(edid_fixup,
161 * Sanity check the EDID block (base or extension). Return 0 if the block 161 * Sanity check the EDID block (base or extension). Return 0 if the block
162 * doesn't check out, or 1 if it's valid. 162 * doesn't check out, or 1 if it's valid.
163 */ 163 */
164bool drm_edid_block_valid(u8 *raw_edid, int block) 164bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
165{ 165{
166 int i; 166 int i;
167 u8 csum = 0; 167 u8 csum = 0;
@@ -184,7 +184,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
184 for (i = 0; i < EDID_LENGTH; i++) 184 for (i = 0; i < EDID_LENGTH; i++)
185 csum += raw_edid[i]; 185 csum += raw_edid[i];
186 if (csum) { 186 if (csum) {
187 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 187 if (print_bad_edid) {
188 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
189 }
188 190
189 /* allow CEA to slide through, switches mangle this */ 191 /* allow CEA to slide through, switches mangle this */
190 if (raw_edid[0] != 0x02) 192 if (raw_edid[0] != 0x02)
@@ -210,7 +212,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
210 return 1; 212 return 1;
211 213
212bad: 214bad:
213 if (raw_edid) { 215 if (raw_edid && print_bad_edid) {
214 printk(KERN_ERR "Raw EDID:\n"); 216 printk(KERN_ERR "Raw EDID:\n");
215 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, 217 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
216 raw_edid, EDID_LENGTH, false); 218 raw_edid, EDID_LENGTH, false);
@@ -234,7 +236,7 @@ bool drm_edid_is_valid(struct edid *edid)
234 return false; 236 return false;
235 237
236 for (i = 0; i <= edid->extensions; i++) 238 for (i = 0; i <= edid->extensions; i++)
237 if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i)) 239 if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
238 return false; 240 return false;
239 241
240 return true; 242 return true;
@@ -257,6 +259,8 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
257 int block, int len) 259 int block, int len)
258{ 260{
259 unsigned char start = block * EDID_LENGTH; 261 unsigned char start = block * EDID_LENGTH;
262 unsigned char segment = block >> 1;
263 unsigned char xfers = segment ? 3 : 2;
260 int ret, retries = 5; 264 int ret, retries = 5;
261 265
262 /* The core i2c driver will automatically retry the transfer if the 266 /* The core i2c driver will automatically retry the transfer if the
@@ -268,6 +272,11 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
268 do { 272 do {
269 struct i2c_msg msgs[] = { 273 struct i2c_msg msgs[] = {
270 { 274 {
275 .addr = DDC_SEGMENT_ADDR,
276 .flags = 0,
277 .len = 1,
278 .buf = &segment,
279 }, {
271 .addr = DDC_ADDR, 280 .addr = DDC_ADDR,
272 .flags = 0, 281 .flags = 0,
273 .len = 1, 282 .len = 1,
@@ -279,15 +288,21 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
279 .buf = buf, 288 .buf = buf,
280 } 289 }
281 }; 290 };
282 ret = i2c_transfer(adapter, msgs, 2); 291
292 /*
293 * Avoid sending the segment addr to not upset non-compliant ddc
294 * monitors.
295 */
296 ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
297
283 if (ret == -ENXIO) { 298 if (ret == -ENXIO) {
284 DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n", 299 DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
285 adapter->name); 300 adapter->name);
286 break; 301 break;
287 } 302 }
288 } while (ret != 2 && --retries); 303 } while (ret != xfers && --retries);
289 304
290 return ret == 2 ? 0 : -1; 305 return ret == xfers ? 0 : -1;
291} 306}
292 307
293static bool drm_edid_is_zero(u8 *in_edid, int length) 308static bool drm_edid_is_zero(u8 *in_edid, int length)
@@ -306,6 +321,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
306{ 321{
307 int i, j = 0, valid_extensions = 0; 322 int i, j = 0, valid_extensions = 0;
308 u8 *block, *new; 323 u8 *block, *new;
324 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
309 325
310 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 326 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
311 return NULL; 327 return NULL;
@@ -314,7 +330,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
314 for (i = 0; i < 4; i++) { 330 for (i = 0; i < 4; i++) {
315 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) 331 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
316 goto out; 332 goto out;
317 if (drm_edid_block_valid(block, 0)) 333 if (drm_edid_block_valid(block, 0, print_bad_edid))
318 break; 334 break;
319 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) { 335 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
320 connector->null_edid_counter++; 336 connector->null_edid_counter++;
@@ -339,7 +355,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
339 block + (valid_extensions + 1) * EDID_LENGTH, 355 block + (valid_extensions + 1) * EDID_LENGTH,
340 j, EDID_LENGTH)) 356 j, EDID_LENGTH))
341 goto out; 357 goto out;
342 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j)) { 358 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
343 valid_extensions++; 359 valid_extensions++;
344 break; 360 break;
345 } 361 }
@@ -362,8 +378,11 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
362 return block; 378 return block;
363 379
364carp: 380carp:
365 dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", 381 if (print_bad_edid) {
366 drm_get_connector_name(connector), j); 382 dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
383 drm_get_connector_name(connector), j);
384 }
385 connector->bad_edid_counter++;
367 386
368out: 387out:
369 kfree(block); 388 kfree(block);
@@ -402,10 +421,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
402 if (drm_probe_ddc(adapter)) 421 if (drm_probe_ddc(adapter))
403 edid = (struct edid *)drm_do_get_edid(connector, adapter); 422 edid = (struct edid *)drm_do_get_edid(connector, adapter);
404 423
405 connector->display_info.raw_edid = (char *)edid;
406
407 return edid; 424 return edid;
408
409} 425}
410EXPORT_SYMBOL(drm_get_edid); 426EXPORT_SYMBOL(drm_get_edid);
411 427
@@ -1523,16 +1539,57 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
1523} 1539}
1524 1540
1525static int 1541static int
1542cea_db_payload_len(const u8 *db)
1543{
1544 return db[0] & 0x1f;
1545}
1546
1547static int
1548cea_db_tag(const u8 *db)
1549{
1550 return db[0] >> 5;
1551}
1552
1553static int
1554cea_revision(const u8 *cea)
1555{
1556 return cea[1];
1557}
1558
1559static int
1560cea_db_offsets(const u8 *cea, int *start, int *end)
1561{
1562 /* Data block offset in CEA extension block */
1563 *start = 4;
1564 *end = cea[2];
1565 if (*end == 0)
1566 *end = 127;
1567 if (*end < 4 || *end > 127)
1568 return -ERANGE;
1569 return 0;
1570}
1571
1572#define for_each_cea_db(cea, i, start, end) \
1573 for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
1574
1575static int
1526add_cea_modes(struct drm_connector *connector, struct edid *edid) 1576add_cea_modes(struct drm_connector *connector, struct edid *edid)
1527{ 1577{
1528 u8 * cea = drm_find_cea_extension(edid); 1578 u8 * cea = drm_find_cea_extension(edid);
1529 u8 * db, dbl; 1579 u8 * db, dbl;
1530 int modes = 0; 1580 int modes = 0;
1531 1581
1532 if (cea && cea[1] >= 3) { 1582 if (cea && cea_revision(cea) >= 3) {
1533 for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { 1583 int i, start, end;
1534 dbl = db[0] & 0x1f; 1584
1535 if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK) 1585 if (cea_db_offsets(cea, &start, &end))
1586 return 0;
1587
1588 for_each_cea_db(cea, i, start, end) {
1589 db = &cea[i];
1590 dbl = cea_db_payload_len(db);
1591
1592 if (cea_db_tag(db) == VIDEO_BLOCK)
1536 modes += do_cea_modes (connector, db+1, dbl); 1593 modes += do_cea_modes (connector, db+1, dbl);
1537 } 1594 }
1538 } 1595 }
@@ -1541,19 +1598,28 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
1541} 1598}
1542 1599
1543static void 1600static void
1544parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db) 1601parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
1545{ 1602{
1546 connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */ 1603 u8 len = cea_db_payload_len(db);
1547
1548 connector->dvi_dual = db[6] & 1;
1549 connector->max_tmds_clock = db[7] * 5;
1550 1604
1551 connector->latency_present[0] = db[8] >> 7; 1605 if (len >= 6) {
1552 connector->latency_present[1] = (db[8] >> 6) & 1; 1606 connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
1553 connector->video_latency[0] = db[9]; 1607 connector->dvi_dual = db[6] & 1;
1554 connector->audio_latency[0] = db[10]; 1608 }
1555 connector->video_latency[1] = db[11]; 1609 if (len >= 7)
1556 connector->audio_latency[1] = db[12]; 1610 connector->max_tmds_clock = db[7] * 5;
1611 if (len >= 8) {
1612 connector->latency_present[0] = db[8] >> 7;
1613 connector->latency_present[1] = (db[8] >> 6) & 1;
1614 }
1615 if (len >= 9)
1616 connector->video_latency[0] = db[9];
1617 if (len >= 10)
1618 connector->audio_latency[0] = db[10];
1619 if (len >= 11)
1620 connector->video_latency[1] = db[11];
1621 if (len >= 12)
1622 connector->audio_latency[1] = db[12];
1557 1623
1558 DRM_LOG_KMS("HDMI: DVI dual %d, " 1624 DRM_LOG_KMS("HDMI: DVI dual %d, "
1559 "max TMDS clock %d, " 1625 "max TMDS clock %d, "
@@ -1577,6 +1643,21 @@ monitor_name(struct detailed_timing *t, void *data)
1577 *(u8 **)data = t->data.other_data.data.str.str; 1643 *(u8 **)data = t->data.other_data.data.str.str;
1578} 1644}
1579 1645
1646static bool cea_db_is_hdmi_vsdb(const u8 *db)
1647{
1648 int hdmi_id;
1649
1650 if (cea_db_tag(db) != VENDOR_BLOCK)
1651 return false;
1652
1653 if (cea_db_payload_len(db) < 5)
1654 return false;
1655
1656 hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
1657
1658 return hdmi_id == HDMI_IDENTIFIER;
1659}
1660
1580/** 1661/**
1581 * drm_edid_to_eld - build ELD from EDID 1662 * drm_edid_to_eld - build ELD from EDID
1582 * @connector: connector corresponding to the HDMI/DP sink 1663 * @connector: connector corresponding to the HDMI/DP sink
@@ -1623,29 +1704,40 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
1623 eld[18] = edid->prod_code[0]; 1704 eld[18] = edid->prod_code[0];
1624 eld[19] = edid->prod_code[1]; 1705 eld[19] = edid->prod_code[1];
1625 1706
1626 if (cea[1] >= 3) 1707 if (cea_revision(cea) >= 3) {
1627 for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) { 1708 int i, start, end;
1628 dbl = db[0] & 0x1f; 1709
1629 1710 if (cea_db_offsets(cea, &start, &end)) {
1630 switch ((db[0] & 0xe0) >> 5) { 1711 start = 0;
1712 end = 0;
1713 }
1714
1715 for_each_cea_db(cea, i, start, end) {
1716 db = &cea[i];
1717 dbl = cea_db_payload_len(db);
1718
1719 switch (cea_db_tag(db)) {
1631 case AUDIO_BLOCK: 1720 case AUDIO_BLOCK:
1632 /* Audio Data Block, contains SADs */ 1721 /* Audio Data Block, contains SADs */
1633 sad_count = dbl / 3; 1722 sad_count = dbl / 3;
1634 memcpy(eld + 20 + mnl, &db[1], dbl); 1723 if (dbl >= 1)
1724 memcpy(eld + 20 + mnl, &db[1], dbl);
1635 break; 1725 break;
1636 case SPEAKER_BLOCK: 1726 case SPEAKER_BLOCK:
1637 /* Speaker Allocation Data Block */ 1727 /* Speaker Allocation Data Block */
1638 eld[7] = db[1]; 1728 if (dbl >= 1)
1729 eld[7] = db[1];
1639 break; 1730 break;
1640 case VENDOR_BLOCK: 1731 case VENDOR_BLOCK:
1641 /* HDMI Vendor-Specific Data Block */ 1732 /* HDMI Vendor-Specific Data Block */
1642 if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0) 1733 if (cea_db_is_hdmi_vsdb(db))
1643 parse_hdmi_vsdb(connector, db); 1734 parse_hdmi_vsdb(connector, db);
1644 break; 1735 break;
1645 default: 1736 default:
1646 break; 1737 break;
1647 } 1738 }
1648 } 1739 }
1740 }
1649 eld[5] |= sad_count << 4; 1741 eld[5] |= sad_count << 4;
1650 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4; 1742 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
1651 1743
@@ -1723,38 +1815,26 @@ EXPORT_SYMBOL(drm_select_eld);
1723bool drm_detect_hdmi_monitor(struct edid *edid) 1815bool drm_detect_hdmi_monitor(struct edid *edid)
1724{ 1816{
1725 u8 *edid_ext; 1817 u8 *edid_ext;
1726 int i, hdmi_id; 1818 int i;
1727 int start_offset, end_offset; 1819 int start_offset, end_offset;
1728 bool is_hdmi = false;
1729 1820
1730 edid_ext = drm_find_cea_extension(edid); 1821 edid_ext = drm_find_cea_extension(edid);
1731 if (!edid_ext) 1822 if (!edid_ext)
1732 goto end; 1823 return false;
1733 1824
1734 /* Data block offset in CEA extension block */ 1825 if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
1735 start_offset = 4; 1826 return false;
1736 end_offset = edid_ext[2];
1737 1827
1738 /* 1828 /*
1739 * Because HDMI identifier is in Vendor Specific Block, 1829 * Because HDMI identifier is in Vendor Specific Block,
1740 * search it from all data blocks of CEA extension. 1830 * search it from all data blocks of CEA extension.
1741 */ 1831 */
1742 for (i = start_offset; i < end_offset; 1832 for_each_cea_db(edid_ext, i, start_offset, end_offset) {
1743 /* Increased by data block len */ 1833 if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
1744 i += ((edid_ext[i] & 0x1f) + 1)) { 1834 return true;
1745 /* Find vendor specific block */
1746 if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
1747 hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
1748 edid_ext[i + 3] << 16;
1749 /* Find HDMI identifier */
1750 if (hdmi_id == HDMI_IDENTIFIER)
1751 is_hdmi = true;
1752 break;
1753 }
1754 } 1835 }
1755 1836
1756end: 1837 return false;
1757 return is_hdmi;
1758} 1838}
1759EXPORT_SYMBOL(drm_detect_hdmi_monitor); 1839EXPORT_SYMBOL(drm_detect_hdmi_monitor);
1760 1840
@@ -1786,15 +1866,13 @@ bool drm_detect_monitor_audio(struct edid *edid)
1786 goto end; 1866 goto end;
1787 } 1867 }
1788 1868
1789 /* Data block offset in CEA extension block */ 1869 if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
1790 start_offset = 4; 1870 goto end;
1791 end_offset = edid_ext[2];
1792 1871
1793 for (i = start_offset; i < end_offset; 1872 for_each_cea_db(edid_ext, i, start_offset, end_offset) {
1794 i += ((edid_ext[i] & 0x1f) + 1)) { 1873 if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
1795 if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
1796 has_audio = true; 1874 has_audio = true;
1797 for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) 1875 for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
1798 DRM_DEBUG_KMS("CEA audio format %d\n", 1876 DRM_DEBUG_KMS("CEA audio format %d\n",
1799 (edid_ext[i + j] >> 3) & 0xf); 1877 (edid_ext[i + j] >> 3) & 0xf);
1800 goto end; 1878 goto end;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9d53e6503f9a..38d3943f72de 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -114,8 +114,8 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
114 }, 114 },
115}; 115};
116 116
117static int edid_load(struct drm_connector *connector, char *name, 117static u8 *edid_load(struct drm_connector *connector, char *name,
118 char *connector_name) 118 char *connector_name)
119{ 119{
120 const struct firmware *fw; 120 const struct firmware *fw;
121 struct platform_device *pdev; 121 struct platform_device *pdev;
@@ -123,6 +123,7 @@ static int edid_load(struct drm_connector *connector, char *name,
123 int fwsize, expected; 123 int fwsize, expected;
124 int builtin = 0, err = 0; 124 int builtin = 0, err = 0;
125 int i, valid_extensions = 0; 125 int i, valid_extensions = 0;
126 bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
126 127
127 pdev = platform_device_register_simple(connector_name, -1, NULL, 0); 128 pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
128 if (IS_ERR(pdev)) { 129 if (IS_ERR(pdev)) {
@@ -173,7 +174,8 @@ static int edid_load(struct drm_connector *connector, char *name,
173 } 174 }
174 memcpy(edid, fwdata, fwsize); 175 memcpy(edid, fwdata, fwsize);
175 176
176 if (!drm_edid_block_valid(edid, 0)) { 177 if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
178 connector->bad_edid_counter++;
177 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ", 179 DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
178 name); 180 name);
179 kfree(edid); 181 kfree(edid);
@@ -185,7 +187,7 @@ static int edid_load(struct drm_connector *connector, char *name,
185 if (i != valid_extensions + 1) 187 if (i != valid_extensions + 1)
186 memcpy(edid + (valid_extensions + 1) * EDID_LENGTH, 188 memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
187 edid + i * EDID_LENGTH, EDID_LENGTH); 189 edid + i * EDID_LENGTH, EDID_LENGTH);
188 if (drm_edid_block_valid(edid + i * EDID_LENGTH, i)) 190 if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
189 valid_extensions++; 191 valid_extensions++;
190 } 192 }
191 193
@@ -205,7 +207,6 @@ static int edid_load(struct drm_connector *connector, char *name,
205 edid = new_edid; 207 edid = new_edid;
206 } 208 }
207 209
208 connector->display_info.raw_edid = edid;
209 DRM_INFO("Got %s EDID base block and %d extension%s from " 210 DRM_INFO("Got %s EDID base block and %d extension%s from "
210 "\"%s\" for connector \"%s\"\n", builtin ? "built-in" : 211 "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
211 "external", valid_extensions, valid_extensions == 1 ? "" : "s", 212 "external", valid_extensions, valid_extensions == 1 ? "" : "s",
@@ -215,7 +216,10 @@ relfw_out:
215 release_firmware(fw); 216 release_firmware(fw);
216 217
217out: 218out:
218 return err; 219 if (err)
220 return ERR_PTR(err);
221
222 return edid;
219} 223}
220 224
221int drm_load_edid_firmware(struct drm_connector *connector) 225int drm_load_edid_firmware(struct drm_connector *connector)
@@ -223,6 +227,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
223 char *connector_name = drm_get_connector_name(connector); 227 char *connector_name = drm_get_connector_name(connector);
224 char *edidname = edid_firmware, *last, *colon; 228 char *edidname = edid_firmware, *last, *colon;
225 int ret; 229 int ret;
230 struct edid *edid;
226 231
227 if (*edidname == '\0') 232 if (*edidname == '\0')
228 return 0; 233 return 0;
@@ -240,13 +245,13 @@ int drm_load_edid_firmware(struct drm_connector *connector)
240 if (*last == '\n') 245 if (*last == '\n')
241 *last = '\0'; 246 *last = '\0';
242 247
243 ret = edid_load(connector, edidname, connector_name); 248 edid = (struct edid *) edid_load(connector, edidname, connector_name);
244 if (ret) 249 if (IS_ERR_OR_NULL(edid))
245 return 0; 250 return 0;
246 251
247 drm_mode_connector_update_edid_property(connector, 252 drm_mode_connector_update_edid_property(connector, edid);
248 (struct edid *) connector->display_info.raw_edid); 253 ret = drm_add_edid_modes(connector, edid);
254 kfree(edid);
249 255
250 return drm_add_edid_modes(connector, (struct edid *) 256 return ret;
251 connector->display_info.raw_edid);
252} 257}
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index fbd354c1f1f4..5dbf7d2557b4 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -89,7 +89,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
89 976, 1088, 0, 480, 486, 494, 517, 0, 89 976, 1088, 0, 480, 486, 494, 517, 0,
90 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 90 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
91 /* 1024x768@43Hz, interlace */ 91 /* 1024x768@43Hz, interlace */
92 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, 92 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
93 1208, 1264, 0, 768, 768, 772, 817, 0, 93 1208, 1264, 0, 768, 768, 772, 817, 0,
94 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 94 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
95 DRM_MODE_FLAG_INTERLACE) }, 95 DRM_MODE_FLAG_INTERLACE) },
@@ -395,7 +395,7 @@ static const struct drm_display_mode edid_est_modes[] = {
395 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 395 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
396 1184, 1344, 0, 768, 771, 777, 806, 0, 396 1184, 1344, 0, 768, 771, 777, 806, 0,
397 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ 397 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
398 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, 398 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
399 1208, 1264, 0, 768, 768, 776, 817, 0, 399 1208, 1264, 0, 768, 768, 776, 817, 0,
400 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ 400 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
401 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, 401 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
@@ -506,17 +506,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
506 1430, 1650, 0, 720, 725, 730, 750, 0, 506 1430, 1650, 0, 720, 725, 730, 750, 0,
507 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 507 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
508 /* 5 - 1920x1080i@60Hz */ 508 /* 5 - 1920x1080i@60Hz */
509 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, 509 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
510 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 510 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
511 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 511 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
512 DRM_MODE_FLAG_INTERLACE) }, 512 DRM_MODE_FLAG_INTERLACE) },
513 /* 6 - 1440x480i@60Hz */ 513 /* 6 - 1440x480i@60Hz */
514 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 514 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
515 1602, 1716, 0, 480, 488, 494, 525, 0, 515 1602, 1716, 0, 480, 488, 494, 525, 0,
516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
517 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 517 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
518 /* 7 - 1440x480i@60Hz */ 518 /* 7 - 1440x480i@60Hz */
519 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, 519 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
520 1602, 1716, 0, 480, 488, 494, 525, 0, 520 1602, 1716, 0, 480, 488, 494, 525, 0,
521 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 521 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
522 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 522 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -531,12 +531,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
531 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 531 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
532 DRM_MODE_FLAG_DBLCLK) }, 532 DRM_MODE_FLAG_DBLCLK) },
533 /* 10 - 2880x480i@60Hz */ 533 /* 10 - 2880x480i@60Hz */
534 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 534 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
535 3204, 3432, 0, 480, 488, 494, 525, 0, 535 3204, 3432, 0, 480, 488, 494, 525, 0,
536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
537 DRM_MODE_FLAG_INTERLACE) }, 537 DRM_MODE_FLAG_INTERLACE) },
538 /* 11 - 2880x480i@60Hz */ 538 /* 11 - 2880x480i@60Hz */
539 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, 539 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
540 3204, 3432, 0, 480, 488, 494, 525, 0, 540 3204, 3432, 0, 480, 488, 494, 525, 0,
541 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 541 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
542 DRM_MODE_FLAG_INTERLACE) }, 542 DRM_MODE_FLAG_INTERLACE) },
@@ -573,17 +573,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
573 1760, 1980, 0, 720, 725, 730, 750, 0, 573 1760, 1980, 0, 720, 725, 730, 750, 0,
574 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 574 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
575 /* 20 - 1920x1080i@50Hz */ 575 /* 20 - 1920x1080i@50Hz */
576 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, 576 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
577 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 577 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
578 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 578 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
579 DRM_MODE_FLAG_INTERLACE) }, 579 DRM_MODE_FLAG_INTERLACE) },
580 /* 21 - 1440x576i@50Hz */ 580 /* 21 - 1440x576i@50Hz */
581 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 581 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
582 1590, 1728, 0, 576, 580, 586, 625, 0, 582 1590, 1728, 0, 576, 580, 586, 625, 0,
583 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 583 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
584 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 584 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
585 /* 22 - 1440x576i@50Hz */ 585 /* 22 - 1440x576i@50Hz */
586 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, 586 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
587 1590, 1728, 0, 576, 580, 586, 625, 0, 587 1590, 1728, 0, 576, 580, 586, 625, 0,
588 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 588 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
589 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 589 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -598,12 +598,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
599 DRM_MODE_FLAG_DBLCLK) }, 599 DRM_MODE_FLAG_DBLCLK) },
600 /* 25 - 2880x576i@50Hz */ 600 /* 25 - 2880x576i@50Hz */
601 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 601 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
602 3180, 3456, 0, 576, 580, 586, 625, 0, 602 3180, 3456, 0, 576, 580, 586, 625, 0,
603 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 603 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
604 DRM_MODE_FLAG_INTERLACE) }, 604 DRM_MODE_FLAG_INTERLACE) },
605 /* 26 - 2880x576i@50Hz */ 605 /* 26 - 2880x576i@50Hz */
606 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, 606 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
607 3180, 3456, 0, 576, 580, 586, 625, 0, 607 3180, 3456, 0, 576, 580, 586, 625, 0,
608 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 608 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
609 DRM_MODE_FLAG_INTERLACE) }, 609 DRM_MODE_FLAG_INTERLACE) },
@@ -656,12 +656,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
656 3184, 3456, 0, 576, 581, 586, 625, 0, 656 3184, 3456, 0, 576, 581, 586, 625, 0,
657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
658 /* 39 - 1920x1080i@50Hz */ 658 /* 39 - 1920x1080i@50Hz */
659 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, 659 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
660 2120, 2304, 0, 1080, 1126, 1136, 1250, 0, 660 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | 661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
662 DRM_MODE_FLAG_INTERLACE) }, 662 DRM_MODE_FLAG_INTERLACE) },
663 /* 40 - 1920x1080i@100Hz */ 663 /* 40 - 1920x1080i@100Hz */
664 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, 664 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
665 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, 665 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
666 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 666 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
667 DRM_MODE_FLAG_INTERLACE) }, 667 DRM_MODE_FLAG_INTERLACE) },
@@ -688,7 +688,7 @@ static const struct drm_display_mode edid_cea_modes[] = {
688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
689 DRM_MODE_FLAG_DBLCLK) }, 689 DRM_MODE_FLAG_DBLCLK) },
690 /* 46 - 1920x1080i@120Hz */ 690 /* 46 - 1920x1080i@120Hz */
691 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, 691 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
692 2052, 2200, 0, 1080, 1084, 1094, 1125, 0, 692 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
693 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | 693 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
694 DRM_MODE_FLAG_INTERLACE) }, 694 DRM_MODE_FLAG_INTERLACE) },
@@ -705,12 +705,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
705 798, 858, 0, 480, 489, 495, 525, 0, 705 798, 858, 0, 480, 489, 495, 525, 0,
706 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 706 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
707 /* 50 - 1440x480i@120Hz */ 707 /* 50 - 1440x480i@120Hz */
708 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 708 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
709 1602, 1716, 0, 480, 488, 494, 525, 0, 709 1602, 1716, 0, 480, 488, 494, 525, 0,
710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
711 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 711 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
712 /* 51 - 1440x480i@120Hz */ 712 /* 51 - 1440x480i@120Hz */
713 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, 713 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
714 1602, 1716, 0, 480, 488, 494, 525, 0, 714 1602, 1716, 0, 480, 488, 494, 525, 0,
715 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 715 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
716 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 716 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -723,12 +723,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
723 796, 864, 0, 576, 581, 586, 625, 0, 723 796, 864, 0, 576, 581, 586, 625, 0,
724 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 724 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
725 /* 54 - 1440x576i@200Hz */ 725 /* 54 - 1440x576i@200Hz */
726 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 726 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
727 1590, 1728, 0, 576, 580, 586, 625, 0, 727 1590, 1728, 0, 576, 580, 586, 625, 0,
728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
729 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 729 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
730 /* 55 - 1440x576i@200Hz */ 730 /* 55 - 1440x576i@200Hz */
731 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, 731 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
732 1590, 1728, 0, 576, 580, 586, 625, 0, 732 1590, 1728, 0, 576, 580, 586, 625, 0,
733 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 733 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
734 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 734 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
@@ -741,12 +741,12 @@ static const struct drm_display_mode edid_cea_modes[] = {
741 798, 858, 0, 480, 489, 495, 525, 0, 741 798, 858, 0, 480, 489, 495, 525, 0,
742 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 742 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
743 /* 58 - 1440x480i@240 */ 743 /* 58 - 1440x480i@240 */
744 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 744 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
745 1602, 1716, 0, 480, 488, 494, 525, 0, 745 1602, 1716, 0, 480, 488, 494, 525, 0,
746 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 746 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
747 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 747 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
748 /* 59 - 1440x480i@240 */ 748 /* 59 - 1440x480i@240 */
749 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, 749 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
750 1602, 1716, 0, 480, 488, 494, 525, 0, 750 1602, 1716, 0, 480, 488, 494, 525, 0,
751 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | 751 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
752 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, 752 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
new file mode 100644
index 000000000000..09e11a5d921a
--- /dev/null
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -0,0 +1,406 @@
1/*
2 * drm kms/fb cma (contiguous memory allocator) helper functions
3 *
4 * Copyright (C) 2012 Analog Device Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
6 *
7 * Based on udl_fbdev.c
8 * Copyright (C) 2012 Red Hat
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <drm/drmP.h>
21#include <drm/drm_crtc.h>
22#include <drm/drm_fb_helper.h>
23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h>
25#include <drm/drm_fb_cma_helper.h>
26#include <linux/module.h>
27
28struct drm_fb_cma {
29 struct drm_framebuffer fb;
30 struct drm_gem_cma_object *obj[4];
31};
32
33struct drm_fbdev_cma {
34 struct drm_fb_helper fb_helper;
35 struct drm_fb_cma *fb;
36};
37
38static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
39{
40 return container_of(helper, struct drm_fbdev_cma, fb_helper);
41}
42
43static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
44{
45 return container_of(fb, struct drm_fb_cma, fb);
46}
47
48static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
49{
50 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
51 int i;
52
53 for (i = 0; i < 4; i++) {
54 if (fb_cma->obj[i])
55 drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base);
56 }
57
58 drm_framebuffer_cleanup(fb);
59 kfree(fb_cma);
60}
61
62static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
63 struct drm_file *file_priv, unsigned int *handle)
64{
65 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
66
67 return drm_gem_handle_create(file_priv,
68 &fb_cma->obj[0]->base, handle);
69}
70
71static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
72 .destroy = drm_fb_cma_destroy,
73 .create_handle = drm_fb_cma_create_handle,
74};
75
76static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
77 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
78 unsigned int num_planes)
79{
80 struct drm_fb_cma *fb_cma;
81 int ret;
82 int i;
83
84 fb_cma = kzalloc(sizeof(*fb_cma), GFP_KERNEL);
85 if (!fb_cma)
86 return ERR_PTR(-ENOMEM);
87
88 ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
89 if (ret) {
90 dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
91 kfree(fb_cma);
92 return ERR_PTR(ret);
93 }
94
95 drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
96
97 for (i = 0; i < num_planes; i++)
98 fb_cma->obj[i] = obj[i];
99
100 return fb_cma;
101}
102
103/**
104 * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
105 *
106 * If your hardware has special alignment or pitch requirements these should be
107 * checked before calling this function.
108 */
109struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
110 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
111{
112 struct drm_fb_cma *fb_cma;
113 struct drm_gem_cma_object *objs[4];
114 struct drm_gem_object *obj;
115 unsigned int hsub;
116 unsigned int vsub;
117 int ret;
118 int i;
119
120 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
121 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
122
123 for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
124 unsigned int width = mode_cmd->width / (i ? hsub : 1);
125 unsigned int height = mode_cmd->height / (i ? vsub : 1);
126 unsigned int min_size;
127
128 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
129 if (!obj) {
130 dev_err(dev->dev, "Failed to lookup GEM object\n");
131 ret = -ENXIO;
132 goto err_gem_object_unreference;
133 }
134
135 min_size = (height - 1) * mode_cmd->pitches[i]
136 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
137 + mode_cmd->offsets[i];
138
139 if (obj->size < min_size) {
140 drm_gem_object_unreference_unlocked(obj);
141 ret = -EINVAL;
142 goto err_gem_object_unreference;
143 }
144 objs[i] = to_drm_gem_cma_obj(obj);
145 }
146
147 fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
148 if (IS_ERR(fb_cma)) {
149 ret = PTR_ERR(fb_cma);
150 goto err_gem_object_unreference;
151 }
152
153 return &fb_cma->fb;
154
155err_gem_object_unreference:
156 for (i--; i >= 0; i--)
157 drm_gem_object_unreference_unlocked(&objs[i]->base);
158 return ERR_PTR(ret);
159}
160EXPORT_SYMBOL_GPL(drm_fb_cma_create);
161
162/**
163 * drm_fb_cma_get_gem_obj() - Get CMA GEM object for framebuffer
164 * @fb: The framebuffer
165 * @plane: Which plane
166 *
167 * Return the CMA GEM object for given framebuffer.
168 *
169 * This function will usually be called from the CRTC callback functions.
170 */
171struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
172 unsigned int plane)
173{
174 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
175
176 if (plane >= 4)
177 return NULL;
178
179 return fb_cma->obj[plane];
180}
181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
182
183static struct fb_ops drm_fbdev_cma_ops = {
184 .owner = THIS_MODULE,
185 .fb_fillrect = sys_fillrect,
186 .fb_copyarea = sys_copyarea,
187 .fb_imageblit = sys_imageblit,
188 .fb_check_var = drm_fb_helper_check_var,
189 .fb_set_par = drm_fb_helper_set_par,
190 .fb_blank = drm_fb_helper_blank,
191 .fb_pan_display = drm_fb_helper_pan_display,
192 .fb_setcmap = drm_fb_helper_setcmap,
193};
194
195static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
196 struct drm_fb_helper_surface_size *sizes)
197{
198 struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
199 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
200 struct drm_device *dev = helper->dev;
201 struct drm_gem_cma_object *obj;
202 struct drm_framebuffer *fb;
203 unsigned int bytes_per_pixel;
204 unsigned long offset;
205 struct fb_info *fbi;
206 size_t size;
207 int ret;
208
209 DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
210 sizes->surface_width, sizes->surface_height,
211 sizes->surface_bpp);
212
213 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
214
215 mode_cmd.width = sizes->surface_width;
216 mode_cmd.height = sizes->surface_height;
217 mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
218 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
219 sizes->surface_depth);
220
221 size = mode_cmd.pitches[0] * mode_cmd.height;
222 obj = drm_gem_cma_create(dev, size);
223 if (!obj)
224 return -ENOMEM;
225
226 fbi = framebuffer_alloc(0, dev->dev);
227 if (!fbi) {
228 dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
229 ret = -ENOMEM;
230 goto err_drm_gem_cma_free_object;
231 }
232
233 fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
234 if (IS_ERR(fbdev_cma->fb)) {
235 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
236 ret = PTR_ERR(fbdev_cma->fb);
237 goto err_framebuffer_release;
238 }
239
240 fb = &fbdev_cma->fb->fb;
241 helper->fb = fb;
242 helper->fbdev = fbi;
243
244 fbi->par = helper;
245 fbi->flags = FBINFO_FLAG_DEFAULT;
246 fbi->fbops = &drm_fbdev_cma_ops;
247
248 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
249 if (ret) {
250 dev_err(dev->dev, "Failed to allocate color map.\n");
251 goto err_drm_fb_cma_destroy;
252 }
253
254 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
255 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
256
257 offset = fbi->var.xoffset * bytes_per_pixel;
258 offset += fbi->var.yoffset * fb->pitches[0];
259
260 dev->mode_config.fb_base = (resource_size_t)obj->paddr;
261 fbi->screen_base = obj->vaddr + offset;
262 fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
263 fbi->screen_size = size;
264 fbi->fix.smem_len = size;
265
266 return 0;
267
268err_drm_fb_cma_destroy:
269 drm_fb_cma_destroy(fb);
270err_framebuffer_release:
271 framebuffer_release(fbi);
272err_drm_gem_cma_free_object:
273 drm_gem_cma_free_object(&obj->base);
274 return ret;
275}
276
277static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
278 struct drm_fb_helper_surface_size *sizes)
279{
280 int ret = 0;
281
282 if (!helper->fb) {
283 ret = drm_fbdev_cma_create(helper, sizes);
284 if (ret < 0)
285 return ret;
286 ret = 1;
287 }
288
289 return ret;
290}
291
292static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
293 .fb_probe = drm_fbdev_cma_probe,
294};
295
296/**
297 * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
298 * @dev: DRM device
299 * @preferred_bpp: Preferred bits per pixel for the device
300 * @num_crtc: Number of CRTCs
301 * @max_conn_count: Maximum number of connectors
302 *
303 * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
304 */
305struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
306 unsigned int preferred_bpp, unsigned int num_crtc,
307 unsigned int max_conn_count)
308{
309 struct drm_fbdev_cma *fbdev_cma;
310 struct drm_fb_helper *helper;
311 int ret;
312
313 fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
314 if (!fbdev_cma) {
315 dev_err(dev->dev, "Failed to allocate drm fbdev.\n");
316 return ERR_PTR(-ENOMEM);
317 }
318
319 fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
320 helper = &fbdev_cma->fb_helper;
321
322 ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
323 if (ret < 0) {
324 dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
325 goto err_free;
326 }
327
328 ret = drm_fb_helper_single_add_all_connectors(helper);
329 if (ret < 0) {
330 dev_err(dev->dev, "Failed to add connectors.\n");
331 goto err_drm_fb_helper_fini;
332
333 }
334
335 ret = drm_fb_helper_initial_config(helper, preferred_bpp);
336 if (ret < 0) {
337 dev_err(dev->dev, "Failed to set inital hw configuration.\n");
338 goto err_drm_fb_helper_fini;
339 }
340
341 return fbdev_cma;
342
343err_drm_fb_helper_fini:
344 drm_fb_helper_fini(helper);
345err_free:
346 kfree(fbdev_cma);
347
348 return ERR_PTR(ret);
349}
350EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
351
352/**
353 * drm_fbdev_cma_fini() - Free drm_fbdev_cma struct
354 * @fbdev_cma: The drm_fbdev_cma struct
355 */
356void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
357{
358 if (fbdev_cma->fb_helper.fbdev) {
359 struct fb_info *info;
360 int ret;
361
362 info = fbdev_cma->fb_helper.fbdev;
363 ret = unregister_framebuffer(info);
364 if (ret < 0)
365 DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
366
367 if (info->cmap.len)
368 fb_dealloc_cmap(&info->cmap);
369
370 framebuffer_release(info);
371 }
372
373 if (fbdev_cma->fb)
374 drm_fb_cma_destroy(&fbdev_cma->fb->fb);
375
376 drm_fb_helper_fini(&fbdev_cma->fb_helper);
377 kfree(fbdev_cma);
378}
379EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
380
381/**
382 * drm_fbdev_cma_restore_mode() - Restores initial framebuffer mode
383 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
384 *
385 * This function is usually called from the DRM drivers lastclose callback.
386 */
387void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
388{
389 if (fbdev_cma)
390 drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
391}
392EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
393
394/**
395 * drm_fbdev_cma_hotplug_event() - Poll for hotpulug events
396 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
397 *
398 * This function is usually called from the DRM drivers output_poll_changed
399 * callback.
400 */
401void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
402{
403 if (fbdev_cma)
404 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
405}
406EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dde5c345e75f..4d58d7e6af3f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -236,7 +236,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
236} 236}
237EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); 237EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
238 238
239bool drm_fb_helper_force_kernel_mode(void) 239static bool drm_fb_helper_force_kernel_mode(void)
240{ 240{
241 bool ret, error = false; 241 bool ret, error = false;
242 struct drm_fb_helper *helper; 242 struct drm_fb_helper *helper;
@@ -330,7 +330,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
330 /* Walk the connectors & encoders on this fb turning them on/off */ 330 /* Walk the connectors & encoders on this fb turning them on/off */
331 for (j = 0; j < fb_helper->connector_count; j++) { 331 for (j = 0; j < fb_helper->connector_count; j++) {
332 connector = fb_helper->connector_info[j]->connector; 332 connector = fb_helper->connector_info[j]->connector;
333 drm_helper_connector_dpms(connector, dpms_mode); 333 connector->funcs->dpms(connector, dpms_mode);
334 drm_connector_property_set_value(connector, 334 drm_connector_property_set_value(connector,
335 dev->mode_config.dpms_property, dpms_mode); 335 dev->mode_config.dpms_property, dpms_mode);
336 } 336 }
@@ -1230,7 +1230,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1230 struct drm_device *dev = fb_helper->dev; 1230 struct drm_device *dev = fb_helper->dev;
1231 struct drm_fb_helper_crtc **crtcs; 1231 struct drm_fb_helper_crtc **crtcs;
1232 struct drm_display_mode **modes; 1232 struct drm_display_mode **modes;
1233 struct drm_encoder *encoder;
1234 struct drm_mode_set *modeset; 1233 struct drm_mode_set *modeset;
1235 bool *enabled; 1234 bool *enabled;
1236 int width, height; 1235 int width, height;
@@ -1241,11 +1240,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1241 width = dev->mode_config.max_width; 1240 width = dev->mode_config.max_width;
1242 height = dev->mode_config.max_height; 1241 height = dev->mode_config.max_height;
1243 1242
1244 /* clean out all the encoder/crtc combos */
1245 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1246 encoder->crtc = NULL;
1247 }
1248
1249 crtcs = kcalloc(dev->mode_config.num_connector, 1243 crtcs = kcalloc(dev->mode_config.num_connector,
1250 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); 1244 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1251 modes = kcalloc(dev->mode_config.num_connector, 1245 modes = kcalloc(dev->mode_config.num_connector,
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
new file mode 100644
index 000000000000..1aa8fee1e865
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -0,0 +1,251 @@
1/*
2 * drm gem CMA (contiguous memory allocator) helper functions
3 *
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
5 *
6 * Based on Samsung Exynos code
7 *
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/mm.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/export.h>
24#include <linux/dma-mapping.h>
25
26#include <drm/drmP.h>
27#include <drm/drm.h>
28#include <drm/drm_gem_cma_helper.h>
29
30static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
31{
32 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
33}
34
35static void drm_gem_cma_buf_destroy(struct drm_device *drm,
36 struct drm_gem_cma_object *cma_obj)
37{
38 dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr,
39 cma_obj->paddr);
40}
41
42/*
43 * drm_gem_cma_create - allocate an object with the given size
44 *
45 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
46 * on failure.
47 */
48struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
49 unsigned int size)
50{
51 struct drm_gem_cma_object *cma_obj;
52 struct drm_gem_object *gem_obj;
53 int ret;
54
55 size = round_up(size, PAGE_SIZE);
56
57 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
58 if (!cma_obj)
59 return ERR_PTR(-ENOMEM);
60
61 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
62 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
63 if (!cma_obj->vaddr) {
64 dev_err(drm->dev, "failed to allocate buffer with size %d\n", size);
65 ret = -ENOMEM;
66 goto err_dma_alloc;
67 }
68
69 gem_obj = &cma_obj->base;
70
71 ret = drm_gem_object_init(drm, gem_obj, size);
72 if (ret)
73 goto err_obj_init;
74
75 ret = drm_gem_create_mmap_offset(gem_obj);
76 if (ret)
77 goto err_create_mmap_offset;
78
79 return cma_obj;
80
81err_create_mmap_offset:
82 drm_gem_object_release(gem_obj);
83
84err_obj_init:
85 drm_gem_cma_buf_destroy(drm, cma_obj);
86
87err_dma_alloc:
88 kfree(cma_obj);
89
90 return ERR_PTR(ret);
91}
92EXPORT_SYMBOL_GPL(drm_gem_cma_create);
93
94/*
95 * drm_gem_cma_create_with_handle - allocate an object with the given
96 * size and create a gem handle on it
97 *
98 * returns a struct drm_gem_cma_object* on success or ERR_PTR values
99 * on failure.
100 */
101static struct drm_gem_cma_object *drm_gem_cma_create_with_handle(
102 struct drm_file *file_priv,
103 struct drm_device *drm, unsigned int size,
104 unsigned int *handle)
105{
106 struct drm_gem_cma_object *cma_obj;
107 struct drm_gem_object *gem_obj;
108 int ret;
109
110 cma_obj = drm_gem_cma_create(drm, size);
111 if (IS_ERR(cma_obj))
112 return cma_obj;
113
114 gem_obj = &cma_obj->base;
115
116 /*
117 * allocate a id of idr table where the obj is registered
118 * and handle has the id what user can see.
119 */
120 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
121 if (ret)
122 goto err_handle_create;
123
124 /* drop reference from allocate - handle holds it now. */
125 drm_gem_object_unreference_unlocked(gem_obj);
126
127 return cma_obj;
128
129err_handle_create:
130 drm_gem_cma_free_object(gem_obj);
131
132 return ERR_PTR(ret);
133}
134
135/*
136 * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback
137 * function
138 */
139void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
140{
141 struct drm_gem_cma_object *cma_obj;
142
143 if (gem_obj->map_list.map)
144 drm_gem_free_mmap_offset(gem_obj);
145
146 drm_gem_object_release(gem_obj);
147
148 cma_obj = to_drm_gem_cma_obj(gem_obj);
149
150 drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj);
151
152 kfree(cma_obj);
153}
154EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
155
156/*
157 * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback
158 * function
159 *
160 * This aligns the pitch and size arguments to the minimum required. wrap
161 * this into your own function if you need bigger alignment.
162 */
163int drm_gem_cma_dumb_create(struct drm_file *file_priv,
164 struct drm_device *dev, struct drm_mode_create_dumb *args)
165{
166 struct drm_gem_cma_object *cma_obj;
167 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
168
169 if (args->pitch < min_pitch)
170 args->pitch = min_pitch;
171
172 if (args->size < args->pitch * args->height)
173 args->size = args->pitch * args->height;
174
175 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
176 args->size, &args->handle);
177 if (IS_ERR(cma_obj))
178 return PTR_ERR(cma_obj);
179
180 return 0;
181}
182EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
183
184/*
185 * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback
186 * function
187 */
188int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
189 struct drm_device *drm, uint32_t handle, uint64_t *offset)
190{
191 struct drm_gem_object *gem_obj;
192
193 mutex_lock(&drm->struct_mutex);
194
195 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
196 if (!gem_obj) {
197 dev_err(drm->dev, "failed to lookup gem object\n");
198 mutex_unlock(&drm->struct_mutex);
199 return -EINVAL;
200 }
201
202 *offset = get_gem_mmap_offset(gem_obj);
203
204 drm_gem_object_unreference(gem_obj);
205
206 mutex_unlock(&drm->struct_mutex);
207
208 return 0;
209}
210EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset);
211
212const struct vm_operations_struct drm_gem_cma_vm_ops = {
213 .open = drm_gem_vm_open,
214 .close = drm_gem_vm_close,
215};
216EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
217
218/*
219 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function
220 */
221int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
222{
223 struct drm_gem_object *gem_obj;
224 struct drm_gem_cma_object *cma_obj;
225 int ret;
226
227 ret = drm_gem_mmap(filp, vma);
228 if (ret)
229 return ret;
230
231 gem_obj = vma->vm_private_data;
232 cma_obj = to_drm_gem_cma_obj(gem_obj);
233
234 ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT,
235 vma->vm_end - vma->vm_start, vma->vm_page_prot);
236 if (ret)
237 drm_gem_vm_close(vma);
238
239 return ret;
240}
241EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
242
243/*
244 * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
245 */
246int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
247 struct drm_device *drm, unsigned int handle)
248{
249 return drm_gem_handle_delete(file_priv, handle);
250}
251EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 09975ba1a8f7..3a3d0ce891b9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1236,7 +1236,7 @@ done:
1236 return ret; 1236 return ret;
1237} 1237}
1238 1238
1239void drm_handle_vblank_events(struct drm_device *dev, int crtc) 1239static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1240{ 1240{
1241 struct drm_pending_vblank_event *e, *t; 1241 struct drm_pending_vblank_event *e, *t;
1242 struct timeval now; 1242 struct timeval now;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 85a8fa6e09fe..23a824e6a22a 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -62,7 +62,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
62 tmp = pgprot_writecombine(tmp); 62 tmp = pgprot_writecombine(tmp);
63 else 63 else
64 tmp = pgprot_noncached(tmp); 64 tmp = pgprot_noncached(tmp);
65#elif defined(__sparc__) || defined(__arm__) 65#elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
66 tmp = pgprot_noncached(tmp); 66 tmp = pgprot_noncached(tmp);
67#endif 67#endif
68 return tmp; 68 return tmp;
@@ -619,20 +619,11 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
619 offset = drm_core_get_reg_ofs(dev); 619 offset = drm_core_get_reg_ofs(dev);
620 vma->vm_flags |= VM_IO; /* not in core dump */ 620 vma->vm_flags |= VM_IO; /* not in core dump */
621 vma->vm_page_prot = drm_io_prot(map->type, vma); 621 vma->vm_page_prot = drm_io_prot(map->type, vma);
622#if !defined(__arm__)
623 if (io_remap_pfn_range(vma, vma->vm_start, 622 if (io_remap_pfn_range(vma, vma->vm_start,
624 (map->offset + offset) >> PAGE_SHIFT, 623 (map->offset + offset) >> PAGE_SHIFT,
625 vma->vm_end - vma->vm_start, 624 vma->vm_end - vma->vm_start,
626 vma->vm_page_prot)) 625 vma->vm_page_prot))
627 return -EAGAIN; 626 return -EAGAIN;
628#else
629 if (remap_pfn_range(vma, vma->vm_start,
630 (map->offset + offset) >> PAGE_SHIFT,
631 vma->vm_end - vma->vm_start,
632 vma->vm_page_prot))
633 return -EAGAIN;
634#endif
635
636 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 627 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
637 " offset = 0x%llx\n", 628 " offset = 0x%llx\n",
638 map->type, 629 map->type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index ad01d3a09c11..c2b1b1441ed0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -147,9 +147,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
147 147
148 drm_mode_connector_update_edid_property(connector, edid); 148 drm_mode_connector_update_edid_property(connector, edid);
149 count = drm_add_edid_modes(connector, edid); 149 count = drm_add_edid_modes(connector, edid);
150 150 kfree(edid);
151 kfree(connector->display_info.raw_edid);
152 connector->display_info.raw_edid = edid;
153 } else { 151 } else {
154 struct drm_display_mode *mode = drm_mode_create(connector->dev); 152 struct drm_display_mode *mode = drm_mode_create(connector->dev);
155 struct exynos_drm_panel_info *panel; 153 struct exynos_drm_panel_info *panel;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index be879c079346..bd4ff6348239 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -266,8 +266,8 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
266 /* release drm framebuffer and real buffer */ 266 /* release drm framebuffer and real buffer */
267 if (fb_helper->fb && fb_helper->fb->funcs) { 267 if (fb_helper->fb && fb_helper->fb->funcs) {
268 fb = fb_helper->fb; 268 fb = fb_helper->fb;
269 if (fb && fb->funcs->destroy) 269 if (fb)
270 fb->funcs->destroy(fb); 270 drm_framebuffer_remove(fb);
271 } 271 }
272 272
273 /* release linux framebuffer */ 273 /* release linux framebuffer */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 3e933c911017..8fe431ae537b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -102,7 +102,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
102 u8 *edid, int len) 102 u8 *edid, int len)
103{ 103{
104 struct vidi_context *ctx = get_vidi_context(dev); 104 struct vidi_context *ctx = get_vidi_context(dev);
105 struct edid *raw_edid;
106 105
107 DRM_DEBUG_KMS("%s\n", __FILE__); 106 DRM_DEBUG_KMS("%s\n", __FILE__);
108 107
@@ -115,18 +114,6 @@ static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
115 return -EFAULT; 114 return -EFAULT;
116 } 115 }
117 116
118 raw_edid = kzalloc(len, GFP_KERNEL);
119 if (!raw_edid) {
120 DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
121 return -ENOMEM;
122 }
123
124 memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
125 * EDID_LENGTH, len));
126
127 /* attach the edid data to connector. */
128 connector->display_info.raw_edid = (char *)raw_edid;
129
130 memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions) 117 memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
131 * EDID_LENGTH, len)); 118 * EDID_LENGTH, len));
132 119
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index abfa2a93f0d0..7a2d40a5c1e1 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -3,7 +3,7 @@
3# 3#
4ccflags-y += -I$(srctree)/include/drm 4ccflags-y += -I$(srctree)/include/drm
5 5
6gma500_gfx-y += gem_glue.o \ 6gma500_gfx-y += \
7 accel_2d.o \ 7 accel_2d.o \
8 backlight.o \ 8 backlight.o \
9 framebuffer.o \ 9 framebuffer.o \
@@ -30,7 +30,8 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
30 cdv_intel_crt.o \ 30 cdv_intel_crt.o \
31 cdv_intel_display.o \ 31 cdv_intel_display.o \
32 cdv_intel_hdmi.o \ 32 cdv_intel_hdmi.o \
33 cdv_intel_lvds.o 33 cdv_intel_lvds.o \
34 cdv_intel_dp.o
34 35
35gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ 36gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
36 oaktrail_crtc.o \ 37 oaktrail_crtc.o \
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 20793951fcac..143eba3309c5 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -26,10 +26,55 @@
26#include "intel_bios.h" 26#include "intel_bios.h"
27#include "power.h" 27#include "power.h"
28 28
29static void do_gma_backlight_set(struct drm_device *dev)
30{
31#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
32 struct drm_psb_private *dev_priv = dev->dev_private;
33 backlight_update_status(dev_priv->backlight_device);
34#endif
35}
36
37void gma_backlight_enable(struct drm_device *dev)
38{
39#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
40 struct drm_psb_private *dev_priv = dev->dev_private;
41 dev_priv->backlight_enabled = true;
42 if (dev_priv->backlight_device) {
43 dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
44 do_gma_backlight_set(dev);
45 }
46#endif
47}
48
49void gma_backlight_disable(struct drm_device *dev)
50{
51#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
52 struct drm_psb_private *dev_priv = dev->dev_private;
53 dev_priv->backlight_enabled = false;
54 if (dev_priv->backlight_device) {
55 dev_priv->backlight_device->props.brightness = 0;
56 do_gma_backlight_set(dev);
57 }
58#endif
59}
60
61void gma_backlight_set(struct drm_device *dev, int v)
62{
63#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
64 struct drm_psb_private *dev_priv = dev->dev_private;
65 dev_priv->backlight_level = v;
66 if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
67 dev_priv->backlight_device->props.brightness = v;
68 do_gma_backlight_set(dev);
69 }
70#endif
71}
72
29int gma_backlight_init(struct drm_device *dev) 73int gma_backlight_init(struct drm_device *dev)
30{ 74{
31#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 75#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
32 struct drm_psb_private *dev_priv = dev->dev_private; 76 struct drm_psb_private *dev_priv = dev->dev_private;
77 dev_priv->backlight_enabled = true;
33 return dev_priv->ops->backlight_init(dev); 78 return dev_priv->ops->backlight_init(dev);
34#else 79#else
35 return 0; 80 return 0;
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 7db0e3bf5a5b..1ceca3d13b65 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -58,10 +58,17 @@ static int cdv_output_init(struct drm_device *dev)
58 cdv_intel_lvds_init(dev, &dev_priv->mode_dev); 58 cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
59 59
60 /* These bits indicate HDMI not SDVO on CDV */ 60 /* These bits indicate HDMI not SDVO on CDV */
61 if (REG_READ(SDVOB) & SDVO_DETECTED) 61 if (REG_READ(SDVOB) & SDVO_DETECTED) {
62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB); 62 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
63 if (REG_READ(SDVOC) & SDVO_DETECTED) 63 if (REG_READ(DP_B) & DP_DETECTED)
64 cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_B);
65 }
66
67 if (REG_READ(SDVOC) & SDVO_DETECTED) {
64 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC); 68 cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
69 if (REG_READ(DP_C) & DP_DETECTED)
70 cdv_intel_dp_init(dev, &dev_priv->mode_dev, DP_C);
71 }
65 return 0; 72 return 0;
66} 73}
67 74
@@ -163,6 +170,7 @@ static int cdv_backlight_init(struct drm_device *dev)
163 cdv_get_brightness(cdv_backlight_device); 170 cdv_get_brightness(cdv_backlight_device);
164 backlight_update_status(cdv_backlight_device); 171 backlight_update_status(cdv_backlight_device);
165 dev_priv->backlight_device = cdv_backlight_device; 172 dev_priv->backlight_device = cdv_backlight_device;
173 dev_priv->backlight_enabled = true;
166 return 0; 174 return 0;
167} 175}
168 176
@@ -449,6 +457,7 @@ static void cdv_get_core_freq(struct drm_device *dev)
449 case 6: 457 case 6:
450 case 7: 458 case 7:
451 dev_priv->core_freq = 266; 459 dev_priv->core_freq = 266;
460 break;
452 default: 461 default:
453 dev_priv->core_freq = 0; 462 dev_priv->core_freq = 0;
454 } 463 }
@@ -488,6 +497,65 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
488 } 497 }
489} 498}
490 499
500static const char *force_audio_names[] = {
501 "off",
502 "auto",
503 "on",
504};
505
506void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
507{
508 struct drm_device *dev = connector->dev;
509 struct drm_psb_private *dev_priv = dev->dev_private;
510 struct drm_property *prop;
511 int i;
512
513 prop = dev_priv->force_audio_property;
514 if (prop == NULL) {
515 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
516 "audio",
517 ARRAY_SIZE(force_audio_names));
518 if (prop == NULL)
519 return;
520
521 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
522 drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
523
524 dev_priv->force_audio_property = prop;
525 }
526 drm_connector_attach_property(connector, prop, 0);
527}
528
529
530static const char *broadcast_rgb_names[] = {
531 "Full",
532 "Limited 16:235",
533};
534
535void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
536{
537 struct drm_device *dev = connector->dev;
538 struct drm_psb_private *dev_priv = dev->dev_private;
539 struct drm_property *prop;
540 int i;
541
542 prop = dev_priv->broadcast_rgb_property;
543 if (prop == NULL) {
544 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
545 "Broadcast RGB",
546 ARRAY_SIZE(broadcast_rgb_names));
547 if (prop == NULL)
548 return;
549
550 for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
551 drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
552
553 dev_priv->broadcast_rgb_property = prop;
554 }
555
556 drm_connector_attach_property(connector, prop, 0);
557}
558
491/* Cedarview */ 559/* Cedarview */
492static const struct psb_offset cdv_regmap[2] = { 560static const struct psb_offset cdv_regmap[2] = {
493 { 561 {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index a68509ba22a8..3cfd0931fbfb 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -57,15 +57,26 @@ struct cdv_intel_clock_t {
57struct cdv_intel_limit_t { 57struct cdv_intel_limit_t {
58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1; 58 struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
59 struct cdv_intel_p2_t p2; 59 struct cdv_intel_p2_t p2;
60 bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
61 int, int, struct cdv_intel_clock_t *);
60}; 62};
61 63
64static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
65 struct drm_crtc *crtc, int target, int refclk,
66 struct cdv_intel_clock_t *best_clock);
67static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
68 int refclk,
69 struct cdv_intel_clock_t *best_clock);
70
62#define CDV_LIMIT_SINGLE_LVDS_96 0 71#define CDV_LIMIT_SINGLE_LVDS_96 0
63#define CDV_LIMIT_SINGLE_LVDS_100 1 72#define CDV_LIMIT_SINGLE_LVDS_100 1
64#define CDV_LIMIT_DAC_HDMI_27 2 73#define CDV_LIMIT_DAC_HDMI_27 2
65#define CDV_LIMIT_DAC_HDMI_96 3 74#define CDV_LIMIT_DAC_HDMI_96 3
75#define CDV_LIMIT_DP_27 4
76#define CDV_LIMIT_DP_100 5
66 77
67static const struct cdv_intel_limit_t cdv_intel_limits[] = { 78static const struct cdv_intel_limit_t cdv_intel_limits[] = {
68 { /* CDV_SIGNLE_LVDS_96MHz */ 79 { /* CDV_SINGLE_LVDS_96MHz */
69 .dot = {.min = 20000, .max = 115500}, 80 .dot = {.min = 20000, .max = 115500},
70 .vco = {.min = 1800000, .max = 3600000}, 81 .vco = {.min = 1800000, .max = 3600000},
71 .n = {.min = 2, .max = 6}, 82 .n = {.min = 2, .max = 6},
@@ -76,6 +87,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
76 .p1 = {.min = 2, .max = 10}, 87 .p1 = {.min = 2, .max = 10},
77 .p2 = {.dot_limit = 200000, 88 .p2 = {.dot_limit = 200000,
78 .p2_slow = 14, .p2_fast = 14}, 89 .p2_slow = 14, .p2_fast = 14},
90 .find_pll = cdv_intel_find_best_PLL,
79 }, 91 },
80 { /* CDV_SINGLE_LVDS_100MHz */ 92 { /* CDV_SINGLE_LVDS_100MHz */
81 .dot = {.min = 20000, .max = 115500}, 93 .dot = {.min = 20000, .max = 115500},
@@ -90,6 +102,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
90 * is 80-224Mhz. Prefer single channel as much as possible. 102 * is 80-224Mhz. Prefer single channel as much as possible.
91 */ 103 */
92 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14}, 104 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
105 .find_pll = cdv_intel_find_best_PLL,
93 }, 106 },
94 { /* CDV_DAC_HDMI_27MHz */ 107 { /* CDV_DAC_HDMI_27MHz */
95 .dot = {.min = 20000, .max = 400000}, 108 .dot = {.min = 20000, .max = 400000},
@@ -101,6 +114,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
101 .p = {.min = 5, .max = 90}, 114 .p = {.min = 5, .max = 90},
102 .p1 = {.min = 1, .max = 9}, 115 .p1 = {.min = 1, .max = 9},
103 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 116 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
117 .find_pll = cdv_intel_find_best_PLL,
104 }, 118 },
105 { /* CDV_DAC_HDMI_96MHz */ 119 { /* CDV_DAC_HDMI_96MHz */
106 .dot = {.min = 20000, .max = 400000}, 120 .dot = {.min = 20000, .max = 400000},
@@ -112,7 +126,32 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
112 .p = {.min = 5, .max = 100}, 126 .p = {.min = 5, .max = 100},
113 .p1 = {.min = 1, .max = 10}, 127 .p1 = {.min = 1, .max = 10},
114 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5}, 128 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
129 .find_pll = cdv_intel_find_best_PLL,
130 },
131 { /* CDV_DP_27MHz */
132 .dot = {.min = 160000, .max = 272000},
133 .vco = {.min = 1809000, .max = 3564000},
134 .n = {.min = 1, .max = 1},
135 .m = {.min = 67, .max = 132},
136 .m1 = {.min = 0, .max = 0},
137 .m2 = {.min = 65, .max = 130},
138 .p = {.min = 5, .max = 90},
139 .p1 = {.min = 1, .max = 9},
140 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
141 .find_pll = cdv_intel_find_dp_pll,
115 }, 142 },
143 { /* CDV_DP_100MHz */
144 .dot = {.min = 160000, .max = 272000},
145 .vco = {.min = 1800000, .max = 3600000},
146 .n = {.min = 2, .max = 6},
147 .m = {.min = 60, .max = 164},
148 .m1 = {.min = 0, .max = 0},
149 .m2 = {.min = 58, .max = 162},
150 .p = {.min = 5, .max = 100},
151 .p1 = {.min = 1, .max = 10},
152 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 10},
153 .find_pll = cdv_intel_find_dp_pll,
154 }
116}; 155};
117 156
118#define _wait_for(COND, MS, W) ({ \ 157#define _wait_for(COND, MS, W) ({ \
@@ -132,7 +171,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
132#define wait_for(COND, MS) _wait_for(COND, MS, 1) 171#define wait_for(COND, MS) _wait_for(COND, MS, 1)
133 172
134 173
135static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val) 174int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
136{ 175{
137 int ret; 176 int ret;
138 177
@@ -159,7 +198,7 @@ static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
159 return 0; 198 return 0;
160} 199}
161 200
162static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val) 201int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
163{ 202{
164 int ret; 203 int ret;
165 static bool dpio_debug = true; 204 static bool dpio_debug = true;
@@ -201,7 +240,7 @@ static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
201/* Reset the DPIO configuration register. The BIOS does this at every 240/* Reset the DPIO configuration register. The BIOS does this at every
202 * mode set. 241 * mode set.
203 */ 242 */
204static void cdv_sb_reset(struct drm_device *dev) 243void cdv_sb_reset(struct drm_device *dev)
205{ 244{
206 245
207 REG_WRITE(DPIO_CFG, 0); 246 REG_WRITE(DPIO_CFG, 0);
@@ -216,7 +255,7 @@ static void cdv_sb_reset(struct drm_device *dev)
216 */ 255 */
217static int 256static int
218cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc, 257cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
219 struct cdv_intel_clock_t *clock, bool is_lvds) 258 struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
220{ 259{
221 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc); 260 struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
222 int pipe = psb_crtc->pipe; 261 int pipe = psb_crtc->pipe;
@@ -259,7 +298,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
259 ref_value &= ~(REF_CLK_MASK); 298 ref_value &= ~(REF_CLK_MASK);
260 299
261 /* use DPLL_A for pipeB on CRT/HDMI */ 300 /* use DPLL_A for pipeB on CRT/HDMI */
262 if (pipe == 1 && !is_lvds) { 301 if (pipe == 1 && !is_lvds && !(ddi_select & DP_MASK)) {
263 DRM_DEBUG_KMS("use DPLLA for pipe B\n"); 302 DRM_DEBUG_KMS("use DPLLA for pipe B\n");
264 ref_value |= REF_CLK_DPLLA; 303 ref_value |= REF_CLK_DPLLA;
265 } else { 304 } else {
@@ -336,30 +375,33 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
336 if (ret) 375 if (ret)
337 return ret; 376 return ret;
338 377
339 lane_reg = PSB_LANE0; 378 if (ddi_select) {
340 cdv_sb_read(dev, lane_reg, &lane_value); 379 if ((ddi_select & DDI_MASK) == DDI0_SELECT) {
341 lane_value &= ~(LANE_PLL_MASK); 380 lane_reg = PSB_LANE0;
342 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); 381 cdv_sb_read(dev, lane_reg, &lane_value);
343 cdv_sb_write(dev, lane_reg, lane_value); 382 lane_value &= ~(LANE_PLL_MASK);
344 383 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
345 lane_reg = PSB_LANE1; 384 cdv_sb_write(dev, lane_reg, lane_value);
346 cdv_sb_read(dev, lane_reg, &lane_value); 385
347 lane_value &= ~(LANE_PLL_MASK); 386 lane_reg = PSB_LANE1;
348 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); 387 cdv_sb_read(dev, lane_reg, &lane_value);
349 cdv_sb_write(dev, lane_reg, lane_value); 388 lane_value &= ~(LANE_PLL_MASK);
350 389 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
351 lane_reg = PSB_LANE2; 390 cdv_sb_write(dev, lane_reg, lane_value);
352 cdv_sb_read(dev, lane_reg, &lane_value); 391 } else {
353 lane_value &= ~(LANE_PLL_MASK); 392 lane_reg = PSB_LANE2;
354 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); 393 cdv_sb_read(dev, lane_reg, &lane_value);
355 cdv_sb_write(dev, lane_reg, lane_value); 394 lane_value &= ~(LANE_PLL_MASK);
356 395 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
357 lane_reg = PSB_LANE3; 396 cdv_sb_write(dev, lane_reg, lane_value);
358 cdv_sb_read(dev, lane_reg, &lane_value); 397
359 lane_value &= ~(LANE_PLL_MASK); 398 lane_reg = PSB_LANE3;
360 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe); 399 cdv_sb_read(dev, lane_reg, &lane_value);
361 cdv_sb_write(dev, lane_reg, lane_value); 400 lane_value &= ~(LANE_PLL_MASK);
362 401 lane_value |= LANE_PLL_ENABLE | LANE_PLL_PIPE(pipe);
402 cdv_sb_write(dev, lane_reg, lane_value);
403 }
404 }
363 return 0; 405 return 0;
364} 406}
365 407
@@ -396,6 +438,12 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
396 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96]; 438 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
397 else 439 else
398 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100]; 440 limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
441 } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
442 psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
443 if (refclk == 27000)
444 limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
445 else
446 limit = &cdv_intel_limits[CDV_LIMIT_DP_100];
399 } else { 447 } else {
400 if (refclk == 27000) 448 if (refclk == 27000)
401 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27]; 449 limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
@@ -438,13 +486,12 @@ static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
438 return true; 486 return true;
439} 487}
440 488
441static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target, 489static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
442 int refclk, 490 struct drm_crtc *crtc, int target, int refclk,
443 struct cdv_intel_clock_t *best_clock) 491 struct cdv_intel_clock_t *best_clock)
444{ 492{
445 struct drm_device *dev = crtc->dev; 493 struct drm_device *dev = crtc->dev;
446 struct cdv_intel_clock_t clock; 494 struct cdv_intel_clock_t clock;
447 const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
448 int err = target; 495 int err = target;
449 496
450 497
@@ -498,6 +545,49 @@ static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
498 return err != target; 545 return err != target;
499} 546}
500 547
548static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
549 int refclk,
550 struct cdv_intel_clock_t *best_clock)
551{
552 struct cdv_intel_clock_t clock;
553 if (refclk == 27000) {
554 if (target < 200000) {
555 clock.p1 = 2;
556 clock.p2 = 10;
557 clock.n = 1;
558 clock.m1 = 0;
559 clock.m2 = 118;
560 } else {
561 clock.p1 = 1;
562 clock.p2 = 10;
563 clock.n = 1;
564 clock.m1 = 0;
565 clock.m2 = 98;
566 }
567 } else if (refclk == 100000) {
568 if (target < 200000) {
569 clock.p1 = 2;
570 clock.p2 = 10;
571 clock.n = 5;
572 clock.m1 = 0;
573 clock.m2 = 160;
574 } else {
575 clock.p1 = 1;
576 clock.p2 = 10;
577 clock.n = 5;
578 clock.m1 = 0;
579 clock.m2 = 133;
580 }
581 } else
582 return false;
583 clock.m = clock.m2 + 2;
584 clock.p = clock.p1 * clock.p2;
585 clock.vco = (refclk * clock.m) / clock.n;
586 clock.dot = clock.vco / clock.p;
587 memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
588 return true;
589}
590
501static int cdv_intel_pipe_set_base(struct drm_crtc *crtc, 591static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
502 int x, int y, struct drm_framebuffer *old_fb) 592 int x, int y, struct drm_framebuffer *old_fb)
503{ 593{
@@ -791,7 +881,7 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
791 case DRM_MODE_DPMS_STANDBY: 881 case DRM_MODE_DPMS_STANDBY:
792 case DRM_MODE_DPMS_SUSPEND: 882 case DRM_MODE_DPMS_SUSPEND:
793 if (psb_intel_crtc->active) 883 if (psb_intel_crtc->active)
794 return; 884 break;
795 885
796 psb_intel_crtc->active = true; 886 psb_intel_crtc->active = true;
797 887
@@ -835,17 +925,15 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
835 REG_WRITE(map->status, temp); 925 REG_WRITE(map->status, temp);
836 REG_READ(map->status); 926 REG_READ(map->status);
837 927
838 cdv_intel_update_watermark(dev, crtc);
839 cdv_intel_crtc_load_lut(crtc); 928 cdv_intel_crtc_load_lut(crtc);
840 929
841 /* Give the overlay scaler a chance to enable 930 /* Give the overlay scaler a chance to enable
842 * if it's on this pipe */ 931 * if it's on this pipe */
843 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 932 /* psb_intel_crtc_dpms_video(crtc, true); TODO */
844 psb_intel_crtc->crtc_enable = true;
845 break; 933 break;
846 case DRM_MODE_DPMS_OFF: 934 case DRM_MODE_DPMS_OFF:
847 if (!psb_intel_crtc->active) 935 if (!psb_intel_crtc->active)
848 return; 936 break;
849 937
850 psb_intel_crtc->active = false; 938 psb_intel_crtc->active = false;
851 939
@@ -892,10 +980,9 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
892 980
893 /* Wait for the clocks to turn off. */ 981 /* Wait for the clocks to turn off. */
894 udelay(150); 982 udelay(150);
895 cdv_intel_update_watermark(dev, crtc);
896 psb_intel_crtc->crtc_enable = false;
897 break; 983 break;
898 } 984 }
985 cdv_intel_update_watermark(dev, crtc);
899 /*Set FIFO Watermarks*/ 986 /*Set FIFO Watermarks*/
900 REG_WRITE(DSPARB, 0x3F3E); 987 REG_WRITE(DSPARB, 0x3F3E);
901} 988}
@@ -952,9 +1039,12 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
952 u32 dpll = 0, dspcntr, pipeconf; 1039 u32 dpll = 0, dspcntr, pipeconf;
953 bool ok; 1040 bool ok;
954 bool is_crt = false, is_lvds = false, is_tv = false; 1041 bool is_crt = false, is_lvds = false, is_tv = false;
955 bool is_hdmi = false; 1042 bool is_hdmi = false, is_dp = false;
956 struct drm_mode_config *mode_config = &dev->mode_config; 1043 struct drm_mode_config *mode_config = &dev->mode_config;
957 struct drm_connector *connector; 1044 struct drm_connector *connector;
1045 const struct cdv_intel_limit_t *limit;
1046 u32 ddi_select = 0;
1047 bool is_edp = false;
958 1048
959 list_for_each_entry(connector, &mode_config->connector_list, head) { 1049 list_for_each_entry(connector, &mode_config->connector_list, head) {
960 struct psb_intel_encoder *psb_intel_encoder = 1050 struct psb_intel_encoder *psb_intel_encoder =
@@ -964,6 +1054,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
964 || connector->encoder->crtc != crtc) 1054 || connector->encoder->crtc != crtc)
965 continue; 1055 continue;
966 1056
1057 ddi_select = psb_intel_encoder->ddi_select;
967 switch (psb_intel_encoder->type) { 1058 switch (psb_intel_encoder->type) {
968 case INTEL_OUTPUT_LVDS: 1059 case INTEL_OUTPUT_LVDS:
969 is_lvds = true; 1060 is_lvds = true;
@@ -977,6 +1068,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
977 case INTEL_OUTPUT_HDMI: 1068 case INTEL_OUTPUT_HDMI:
978 is_hdmi = true; 1069 is_hdmi = true;
979 break; 1070 break;
1071 case INTEL_OUTPUT_DISPLAYPORT:
1072 is_dp = true;
1073 break;
1074 case INTEL_OUTPUT_EDP:
1075 is_edp = true;
1076 break;
1077 default:
1078 DRM_ERROR("invalid output type.\n");
1079 return 0;
980 } 1080 }
981 } 1081 }
982 1082
@@ -986,6 +1086,20 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
986 else 1086 else
987 /* high-end sku, 27/100 mhz */ 1087 /* high-end sku, 27/100 mhz */
988 refclk = 27000; 1088 refclk = 27000;
1089 if (is_dp || is_edp) {
1090 /*
1091 * Based on the spec the low-end SKU has only CRT/LVDS. So it is
1092 * unnecessary to consider it for DP/eDP.
1093 * On the high-end SKU, it will use the 27/100M reference clk
1094 * for DP/eDP. When using SSC clock, the ref clk is 100MHz.Otherwise
1095 * it will be 27MHz. From the VBIOS code it seems that the pipe A choose
1096 * 27MHz for DP/eDP while the Pipe B chooses the 100MHz.
1097 */
1098 if (pipe == 0)
1099 refclk = 27000;
1100 else
1101 refclk = 100000;
1102 }
989 1103
990 if (is_lvds && dev_priv->lvds_use_ssc) { 1104 if (is_lvds && dev_priv->lvds_use_ssc) {
991 refclk = dev_priv->lvds_ssc_freq * 1000; 1105 refclk = dev_priv->lvds_ssc_freq * 1000;
@@ -993,8 +1107,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
993 } 1107 }
994 1108
995 drm_mode_debug_printmodeline(adjusted_mode); 1109 drm_mode_debug_printmodeline(adjusted_mode);
1110
1111 limit = cdv_intel_limit(crtc, refclk);
996 1112
997 ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, 1113 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
998 &clock); 1114 &clock);
999 if (!ok) { 1115 if (!ok) {
1000 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n"); 1116 dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
@@ -1009,6 +1125,15 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1009 } 1125 }
1010/* dpll |= PLL_REF_INPUT_DREFCLK; */ 1126/* dpll |= PLL_REF_INPUT_DREFCLK; */
1011 1127
1128 if (is_dp || is_edp) {
1129 cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
1130 } else {
1131 REG_WRITE(PIPE_GMCH_DATA_M(pipe), 0);
1132 REG_WRITE(PIPE_GMCH_DATA_N(pipe), 0);
1133 REG_WRITE(PIPE_DP_LINK_M(pipe), 0);
1134 REG_WRITE(PIPE_DP_LINK_N(pipe), 0);
1135 }
1136
1012 dpll |= DPLL_SYNCLOCK_ENABLE; 1137 dpll |= DPLL_SYNCLOCK_ENABLE;
1013/* if (is_lvds) 1138/* if (is_lvds)
1014 dpll |= DPLLB_MODE_LVDS; 1139 dpll |= DPLLB_MODE_LVDS;
@@ -1019,6 +1144,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1019 /* setup pipeconf */ 1144 /* setup pipeconf */
1020 pipeconf = REG_READ(map->conf); 1145 pipeconf = REG_READ(map->conf);
1021 1146
1147 pipeconf &= ~(PIPE_BPC_MASK);
1148 if (is_edp) {
1149 switch (dev_priv->edp.bpp) {
1150 case 24:
1151 pipeconf |= PIPE_8BPC;
1152 break;
1153 case 18:
1154 pipeconf |= PIPE_6BPC;
1155 break;
1156 case 30:
1157 pipeconf |= PIPE_10BPC;
1158 break;
1159 default:
1160 pipeconf |= PIPE_8BPC;
1161 break;
1162 }
1163 } else if (is_lvds) {
1164 /* the BPC will be 6 if it is 18-bit LVDS panel */
1165 if ((REG_READ(LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
1166 pipeconf |= PIPE_8BPC;
1167 else
1168 pipeconf |= PIPE_6BPC;
1169 } else
1170 pipeconf |= PIPE_8BPC;
1171
1022 /* Set up the display plane register */ 1172 /* Set up the display plane register */
1023 dspcntr = DISPPLANE_GAMMA_ENABLE; 1173 dspcntr = DISPPLANE_GAMMA_ENABLE;
1024 1174
@@ -1033,7 +1183,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
1033 REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE); 1183 REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
1034 REG_READ(map->dpll); 1184 REG_READ(map->dpll);
1035 1185
1036 cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds); 1186 cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds, ddi_select);
1037 1187
1038 udelay(150); 1188 udelay(150);
1039 1189
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
new file mode 100644
index 000000000000..e3a3978cf320
--- /dev/null
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -0,0 +1,1950 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc.h>
32#include <drm/drm_crtc_helper.h>
33#include "psb_drv.h"
34#include "psb_intel_drv.h"
35#include "psb_intel_reg.h"
36#include <drm/drm_dp_helper.h>
37
38#define _wait_for(COND, MS, W) ({ \
39 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
40 int ret__ = 0; \
41 while (! (COND)) { \
42 if (time_after(jiffies, timeout__)) { \
43 ret__ = -ETIMEDOUT; \
44 break; \
45 } \
46 if (W && !in_dbg_master()) msleep(W); \
47 } \
48 ret__; \
49})
50
51#define wait_for(COND, MS) _wait_for(COND, MS, 1)
52
53#define DP_LINK_STATUS_SIZE 6
54#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
55
56#define DP_LINK_CONFIGURATION_SIZE 9
57
58#define CDV_FAST_LINK_TRAIN 1
59
60struct cdv_intel_dp {
61 uint32_t output_reg;
62 uint32_t DP;
63 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
64 bool has_audio;
65 int force_audio;
66 uint32_t color_range;
67 uint8_t link_bw;
68 uint8_t lane_count;
69 uint8_t dpcd[4];
70 struct psb_intel_encoder *encoder;
71 struct i2c_adapter adapter;
72 struct i2c_algo_dp_aux_data algo;
73 uint8_t train_set[4];
74 uint8_t link_status[DP_LINK_STATUS_SIZE];
75 int panel_power_up_delay;
76 int panel_power_down_delay;
77 int panel_power_cycle_delay;
78 int backlight_on_delay;
79 int backlight_off_delay;
80 struct drm_display_mode *panel_fixed_mode; /* for eDP */
81 bool panel_on;
82};
83
84struct ddi_regoff {
85 uint32_t PreEmph1;
86 uint32_t PreEmph2;
87 uint32_t VSwing1;
88 uint32_t VSwing2;
89 uint32_t VSwing3;
90 uint32_t VSwing4;
91 uint32_t VSwing5;
92};
93
94static struct ddi_regoff ddi_DP_train_table[] = {
95 {.PreEmph1 = 0x812c, .PreEmph2 = 0x8124, .VSwing1 = 0x8154,
96 .VSwing2 = 0x8148, .VSwing3 = 0x814C, .VSwing4 = 0x8150,
97 .VSwing5 = 0x8158,},
98 {.PreEmph1 = 0x822c, .PreEmph2 = 0x8224, .VSwing1 = 0x8254,
99 .VSwing2 = 0x8248, .VSwing3 = 0x824C, .VSwing4 = 0x8250,
100 .VSwing5 = 0x8258,},
101};
102
103static uint32_t dp_vswing_premph_table[] = {
104 0x55338954, 0x4000,
105 0x554d8954, 0x2000,
106 0x55668954, 0,
107 0x559ac0d4, 0x6000,
108};
109/**
110 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
111 * @intel_dp: DP struct
112 *
113 * If a CPU or PCH DP output is attached to an eDP panel, this function
114 * will return true, and false otherwise.
115 */
116static bool is_edp(struct psb_intel_encoder *encoder)
117{
118 return encoder->type == INTEL_OUTPUT_EDP;
119}
120
121
122static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
123static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
124static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
125
126static int
127cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
128{
129 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
130 int max_lane_count = 4;
131
132 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
133 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
134 switch (max_lane_count) {
135 case 1: case 2: case 4:
136 break;
137 default:
138 max_lane_count = 4;
139 }
140 }
141 return max_lane_count;
142}
143
144static int
145cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
146{
147 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
148 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
149
150 switch (max_link_bw) {
151 case DP_LINK_BW_1_62:
152 case DP_LINK_BW_2_7:
153 break;
154 default:
155 max_link_bw = DP_LINK_BW_1_62;
156 break;
157 }
158 return max_link_bw;
159}
160
161static int
162cdv_intel_dp_link_clock(uint8_t link_bw)
163{
164 if (link_bw == DP_LINK_BW_2_7)
165 return 270000;
166 else
167 return 162000;
168}
169
170static int
171cdv_intel_dp_link_required(int pixel_clock, int bpp)
172{
173 return (pixel_clock * bpp + 7) / 8;
174}
175
176static int
177cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
178{
179 return (max_link_clock * max_lanes * 19) / 20;
180}
181
182static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
183{
184 struct drm_device *dev = intel_encoder->base.dev;
185 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
186 u32 pp;
187
188 if (intel_dp->panel_on) {
189 DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
190 return;
191 }
192 DRM_DEBUG_KMS("\n");
193
194 pp = REG_READ(PP_CONTROL);
195
196 pp |= EDP_FORCE_VDD;
197 REG_WRITE(PP_CONTROL, pp);
198 REG_READ(PP_CONTROL);
199 msleep(intel_dp->panel_power_up_delay);
200}
201
202static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
203{
204 struct drm_device *dev = intel_encoder->base.dev;
205 u32 pp;
206
207 DRM_DEBUG_KMS("\n");
208 pp = REG_READ(PP_CONTROL);
209
210 pp &= ~EDP_FORCE_VDD;
211 REG_WRITE(PP_CONTROL, pp);
212 REG_READ(PP_CONTROL);
213
214}
215
216/* Returns true if the panel was already on when called */
217static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
218{
219 struct drm_device *dev = intel_encoder->base.dev;
220 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
221 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE;
222
223 if (intel_dp->panel_on)
224 return true;
225
226 DRM_DEBUG_KMS("\n");
227 pp = REG_READ(PP_CONTROL);
228 pp &= ~PANEL_UNLOCK_MASK;
229
230 pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON);
231 REG_WRITE(PP_CONTROL, pp);
232 REG_READ(PP_CONTROL);
233
234 if (wait_for(((REG_READ(PP_STATUS) & idle_on_mask) == idle_on_mask), 1000)) {
235 DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
236 intel_dp->panel_on = false;
237 } else
238 intel_dp->panel_on = true;
239 msleep(intel_dp->panel_power_up_delay);
240
241 return false;
242}
243
244static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
245{
246 struct drm_device *dev = intel_encoder->base.dev;
247 u32 pp, idle_off_mask = PP_ON ;
248 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
249
250 DRM_DEBUG_KMS("\n");
251
252 pp = REG_READ(PP_CONTROL);
253
254 if ((pp & POWER_TARGET_ON) == 0)
255 return;
256
257 intel_dp->panel_on = false;
258 pp &= ~PANEL_UNLOCK_MASK;
259 /* ILK workaround: disable reset around power sequence */
260
261 pp &= ~POWER_TARGET_ON;
262 pp &= ~EDP_FORCE_VDD;
263 pp &= ~EDP_BLC_ENABLE;
264 REG_WRITE(PP_CONTROL, pp);
265 REG_READ(PP_CONTROL);
266 DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
267
268 if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
269 DRM_DEBUG_KMS("Error in turning off Panel\n");
270 }
271
272 msleep(intel_dp->panel_power_cycle_delay);
273 DRM_DEBUG_KMS("Over\n");
274}
275
276static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
277{
278 struct drm_device *dev = intel_encoder->base.dev;
279 u32 pp;
280
281 DRM_DEBUG_KMS("\n");
282 /*
283 * If we enable the backlight right away following a panel power
284 * on, we may see slight flicker as the panel syncs with the eDP
285 * link. So delay a bit to make sure the image is solid before
286 * allowing it to appear.
287 */
288 msleep(300);
289 pp = REG_READ(PP_CONTROL);
290
291 pp |= EDP_BLC_ENABLE;
292 REG_WRITE(PP_CONTROL, pp);
293 gma_backlight_enable(dev);
294}
295
296static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
297{
298 struct drm_device *dev = intel_encoder->base.dev;
299 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
300 u32 pp;
301
302 DRM_DEBUG_KMS("\n");
303 gma_backlight_disable(dev);
304 msleep(10);
305 pp = REG_READ(PP_CONTROL);
306
307 pp &= ~EDP_BLC_ENABLE;
308 REG_WRITE(PP_CONTROL, pp);
309 msleep(intel_dp->backlight_off_delay);
310}
311
312static int
313cdv_intel_dp_mode_valid(struct drm_connector *connector,
314 struct drm_display_mode *mode)
315{
316 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
317 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
318 int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
319 int max_lanes = cdv_intel_dp_max_lane_count(encoder);
320 struct drm_psb_private *dev_priv = connector->dev->dev_private;
321
322 if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
323 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
324 return MODE_PANEL;
325 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
326 return MODE_PANEL;
327 }
328
329 /* only refuse the mode on non eDP since we have seen some weird eDP panels
330 which are outside spec tolerances but somehow work by magic */
331 if (!is_edp(encoder) &&
332 (cdv_intel_dp_link_required(mode->clock, dev_priv->edp.bpp)
333 > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes)))
334 return MODE_CLOCK_HIGH;
335
336 if (is_edp(encoder)) {
337 if (cdv_intel_dp_link_required(mode->clock, 24)
338 > cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
339 return MODE_CLOCK_HIGH;
340
341 }
342 if (mode->clock < 10000)
343 return MODE_CLOCK_LOW;
344
345 return MODE_OK;
346}
347
348static uint32_t
349pack_aux(uint8_t *src, int src_bytes)
350{
351 int i;
352 uint32_t v = 0;
353
354 if (src_bytes > 4)
355 src_bytes = 4;
356 for (i = 0; i < src_bytes; i++)
357 v |= ((uint32_t) src[i]) << ((3-i) * 8);
358 return v;
359}
360
361static void
362unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
363{
364 int i;
365 if (dst_bytes > 4)
366 dst_bytes = 4;
367 for (i = 0; i < dst_bytes; i++)
368 dst[i] = src >> ((3-i) * 8);
369}
370
371static int
372cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
373 uint8_t *send, int send_bytes,
374 uint8_t *recv, int recv_size)
375{
376 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
377 uint32_t output_reg = intel_dp->output_reg;
378 struct drm_device *dev = encoder->base.dev;
379 uint32_t ch_ctl = output_reg + 0x10;
380 uint32_t ch_data = ch_ctl + 4;
381 int i;
382 int recv_bytes;
383 uint32_t status;
384 uint32_t aux_clock_divider;
385 int try, precharge;
386
387 /* The clock divider is based off the hrawclk,
388 * and would like to run at 2MHz. So, take the
389 * hrawclk value and divide by 2 and use that
390 * On CDV platform it uses 200MHz as hrawclk.
391 *
392 */
393 aux_clock_divider = 200 / 2;
394
395 precharge = 4;
396 if (is_edp(encoder))
397 precharge = 10;
398
399 if (REG_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) {
400 DRM_ERROR("dp_aux_ch not started status 0x%08x\n",
401 REG_READ(ch_ctl));
402 return -EBUSY;
403 }
404
405 /* Must try at least 3 times according to DP spec */
406 for (try = 0; try < 5; try++) {
407 /* Load the send data into the aux channel data registers */
408 for (i = 0; i < send_bytes; i += 4)
409 REG_WRITE(ch_data + i,
410 pack_aux(send + i, send_bytes - i));
411
412 /* Send the command and wait for it to complete */
413 REG_WRITE(ch_ctl,
414 DP_AUX_CH_CTL_SEND_BUSY |
415 DP_AUX_CH_CTL_TIME_OUT_400us |
416 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
417 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
418 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
419 DP_AUX_CH_CTL_DONE |
420 DP_AUX_CH_CTL_TIME_OUT_ERROR |
421 DP_AUX_CH_CTL_RECEIVE_ERROR);
422 for (;;) {
423 status = REG_READ(ch_ctl);
424 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
425 break;
426 udelay(100);
427 }
428
429 /* Clear done status and any errors */
430 REG_WRITE(ch_ctl,
431 status |
432 DP_AUX_CH_CTL_DONE |
433 DP_AUX_CH_CTL_TIME_OUT_ERROR |
434 DP_AUX_CH_CTL_RECEIVE_ERROR);
435 if (status & DP_AUX_CH_CTL_DONE)
436 break;
437 }
438
439 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
440 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
441 return -EBUSY;
442 }
443
444 /* Check for timeout or receive error.
445 * Timeouts occur when the sink is not connected
446 */
447 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
448 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
449 return -EIO;
450 }
451
452 /* Timeouts occur when the device isn't connected, so they're
453 * "normal" -- don't fill the kernel log with these */
454 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
455 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
456 return -ETIMEDOUT;
457 }
458
459 /* Unload any bytes sent back from the other side */
460 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
461 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
462 if (recv_bytes > recv_size)
463 recv_bytes = recv_size;
464
465 for (i = 0; i < recv_bytes; i += 4)
466 unpack_aux(REG_READ(ch_data + i),
467 recv + i, recv_bytes - i);
468
469 return recv_bytes;
470}
471
472/* Write data to the aux channel in native mode */
473static int
474cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
475 uint16_t address, uint8_t *send, int send_bytes)
476{
477 int ret;
478 uint8_t msg[20];
479 int msg_bytes;
480 uint8_t ack;
481
482 if (send_bytes > 16)
483 return -1;
484 msg[0] = AUX_NATIVE_WRITE << 4;
485 msg[1] = address >> 8;
486 msg[2] = address & 0xff;
487 msg[3] = send_bytes - 1;
488 memcpy(&msg[4], send, send_bytes);
489 msg_bytes = send_bytes + 4;
490 for (;;) {
491 ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes, &ack, 1);
492 if (ret < 0)
493 return ret;
494 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
495 break;
496 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
497 udelay(100);
498 else
499 return -EIO;
500 }
501 return send_bytes;
502}
503
504/* Write a single byte to the aux channel in native mode */
505static int
506cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
507 uint16_t address, uint8_t byte)
508{
509 return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
510}
511
512/* read bytes from a native aux channel */
513static int
514cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
515 uint16_t address, uint8_t *recv, int recv_bytes)
516{
517 uint8_t msg[4];
518 int msg_bytes;
519 uint8_t reply[20];
520 int reply_bytes;
521 uint8_t ack;
522 int ret;
523
524 msg[0] = AUX_NATIVE_READ << 4;
525 msg[1] = address >> 8;
526 msg[2] = address & 0xff;
527 msg[3] = recv_bytes - 1;
528
529 msg_bytes = 4;
530 reply_bytes = recv_bytes + 1;
531
532 for (;;) {
533 ret = cdv_intel_dp_aux_ch(encoder, msg, msg_bytes,
534 reply, reply_bytes);
535 if (ret == 0)
536 return -EPROTO;
537 if (ret < 0)
538 return ret;
539 ack = reply[0];
540 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
541 memcpy(recv, reply + 1, ret - 1);
542 return ret - 1;
543 }
544 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
545 udelay(100);
546 else
547 return -EIO;
548 }
549}
550
551static int
552cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
553 uint8_t write_byte, uint8_t *read_byte)
554{
555 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
556 struct cdv_intel_dp *intel_dp = container_of(adapter,
557 struct cdv_intel_dp,
558 adapter);
559 struct psb_intel_encoder *encoder = intel_dp->encoder;
560 uint16_t address = algo_data->address;
561 uint8_t msg[5];
562 uint8_t reply[2];
563 unsigned retry;
564 int msg_bytes;
565 int reply_bytes;
566 int ret;
567
568 /* Set up the command byte */
569 if (mode & MODE_I2C_READ)
570 msg[0] = AUX_I2C_READ << 4;
571 else
572 msg[0] = AUX_I2C_WRITE << 4;
573
574 if (!(mode & MODE_I2C_STOP))
575 msg[0] |= AUX_I2C_MOT << 4;
576
577 msg[1] = address >> 8;
578 msg[2] = address;
579
580 switch (mode) {
581 case MODE_I2C_WRITE:
582 msg[3] = 0;
583 msg[4] = write_byte;
584 msg_bytes = 5;
585 reply_bytes = 1;
586 break;
587 case MODE_I2C_READ:
588 msg[3] = 0;
589 msg_bytes = 4;
590 reply_bytes = 2;
591 break;
592 default:
593 msg_bytes = 3;
594 reply_bytes = 1;
595 break;
596 }
597
598 for (retry = 0; retry < 5; retry++) {
599 ret = cdv_intel_dp_aux_ch(encoder,
600 msg, msg_bytes,
601 reply, reply_bytes);
602 if (ret < 0) {
603 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
604 return ret;
605 }
606
607 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
608 case AUX_NATIVE_REPLY_ACK:
609 /* I2C-over-AUX Reply field is only valid
610 * when paired with AUX ACK.
611 */
612 break;
613 case AUX_NATIVE_REPLY_NACK:
614 DRM_DEBUG_KMS("aux_ch native nack\n");
615 return -EREMOTEIO;
616 case AUX_NATIVE_REPLY_DEFER:
617 udelay(100);
618 continue;
619 default:
620 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
621 reply[0]);
622 return -EREMOTEIO;
623 }
624
625 switch (reply[0] & AUX_I2C_REPLY_MASK) {
626 case AUX_I2C_REPLY_ACK:
627 if (mode == MODE_I2C_READ) {
628 *read_byte = reply[1];
629 }
630 return reply_bytes - 1;
631 case AUX_I2C_REPLY_NACK:
632 DRM_DEBUG_KMS("aux_i2c nack\n");
633 return -EREMOTEIO;
634 case AUX_I2C_REPLY_DEFER:
635 DRM_DEBUG_KMS("aux_i2c defer\n");
636 udelay(100);
637 break;
638 default:
639 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
640 return -EREMOTEIO;
641 }
642 }
643
644 DRM_ERROR("too many retries, giving up\n");
645 return -EREMOTEIO;
646}
647
648static int
649cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
650{
651 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
652 int ret;
653
654 DRM_DEBUG_KMS("i2c_init %s\n", name);
655
656 intel_dp->algo.running = false;
657 intel_dp->algo.address = 0;
658 intel_dp->algo.aux_ch = cdv_intel_dp_i2c_aux_ch;
659
660 memset(&intel_dp->adapter, '\0', sizeof (intel_dp->adapter));
661 intel_dp->adapter.owner = THIS_MODULE;
662 intel_dp->adapter.class = I2C_CLASS_DDC;
663 strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
664 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
665 intel_dp->adapter.algo_data = &intel_dp->algo;
666 intel_dp->adapter.dev.parent = &connector->base.kdev;
667
668 if (is_edp(encoder))
669 cdv_intel_edp_panel_vdd_on(encoder);
670 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
671 if (is_edp(encoder))
672 cdv_intel_edp_panel_vdd_off(encoder);
673
674 return ret;
675}
676
677void cdv_intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
678 struct drm_display_mode *adjusted_mode)
679{
680 adjusted_mode->hdisplay = fixed_mode->hdisplay;
681 adjusted_mode->hsync_start = fixed_mode->hsync_start;
682 adjusted_mode->hsync_end = fixed_mode->hsync_end;
683 adjusted_mode->htotal = fixed_mode->htotal;
684
685 adjusted_mode->vdisplay = fixed_mode->vdisplay;
686 adjusted_mode->vsync_start = fixed_mode->vsync_start;
687 adjusted_mode->vsync_end = fixed_mode->vsync_end;
688 adjusted_mode->vtotal = fixed_mode->vtotal;
689
690 adjusted_mode->clock = fixed_mode->clock;
691
692 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
693}
694
695static bool
696cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
697 struct drm_display_mode *adjusted_mode)
698{
699 struct drm_psb_private *dev_priv = encoder->dev->dev_private;
700 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
701 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
702 int lane_count, clock;
703 int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
704 int max_clock = cdv_intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
705 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
706 int refclock = mode->clock;
707 int bpp = 24;
708
709 if (is_edp(intel_encoder) && intel_dp->panel_fixed_mode) {
710 cdv_intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
711 refclock = intel_dp->panel_fixed_mode->clock;
712 bpp = dev_priv->edp.bpp;
713 }
714
715 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
716 for (clock = max_clock; clock >= 0; clock--) {
717 int link_avail = cdv_intel_dp_max_data_rate(cdv_intel_dp_link_clock(bws[clock]), lane_count);
718
719 if (cdv_intel_dp_link_required(refclock, bpp) <= link_avail) {
720 intel_dp->link_bw = bws[clock];
721 intel_dp->lane_count = lane_count;
722 adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
723 DRM_DEBUG_KMS("Display port link bw %02x lane "
724 "count %d clock %d\n",
725 intel_dp->link_bw, intel_dp->lane_count,
726 adjusted_mode->clock);
727 return true;
728 }
729 }
730 }
731 if (is_edp(intel_encoder)) {
732 /* okay we failed just pick the highest */
733 intel_dp->lane_count = max_lane_count;
734 intel_dp->link_bw = bws[max_clock];
735 adjusted_mode->clock = cdv_intel_dp_link_clock(intel_dp->link_bw);
736 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
737 "count %d clock %d\n",
738 intel_dp->link_bw, intel_dp->lane_count,
739 adjusted_mode->clock);
740
741 return true;
742 }
743 return false;
744}
745
746struct cdv_intel_dp_m_n {
747 uint32_t tu;
748 uint32_t gmch_m;
749 uint32_t gmch_n;
750 uint32_t link_m;
751 uint32_t link_n;
752};
753
754static void
755cdv_intel_reduce_ratio(uint32_t *num, uint32_t *den)
756{
757 /*
758 while (*num > 0xffffff || *den > 0xffffff) {
759 *num >>= 1;
760 *den >>= 1;
761 }*/
762 uint64_t value, m;
763 m = *num;
764 value = m * (0x800000);
765 m = do_div(value, *den);
766 *num = value;
767 *den = 0x800000;
768}
769
770static void
771cdv_intel_dp_compute_m_n(int bpp,
772 int nlanes,
773 int pixel_clock,
774 int link_clock,
775 struct cdv_intel_dp_m_n *m_n)
776{
777 m_n->tu = 64;
778 m_n->gmch_m = (pixel_clock * bpp + 7) >> 3;
779 m_n->gmch_n = link_clock * nlanes;
780 cdv_intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
781 m_n->link_m = pixel_clock;
782 m_n->link_n = link_clock;
783 cdv_intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
784}
785
786void
787cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
788 struct drm_display_mode *adjusted_mode)
789{
790 struct drm_device *dev = crtc->dev;
791 struct drm_psb_private *dev_priv = dev->dev_private;
792 struct drm_mode_config *mode_config = &dev->mode_config;
793 struct drm_encoder *encoder;
794 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
795 int lane_count = 4, bpp = 24;
796 struct cdv_intel_dp_m_n m_n;
797 int pipe = intel_crtc->pipe;
798
799 /*
800 * Find the lane count in the intel_encoder private
801 */
802 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
803 struct psb_intel_encoder *intel_encoder;
804 struct cdv_intel_dp *intel_dp;
805
806 if (encoder->crtc != crtc)
807 continue;
808
809 intel_encoder = to_psb_intel_encoder(encoder);
810 intel_dp = intel_encoder->dev_priv;
811 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
812 lane_count = intel_dp->lane_count;
813 break;
814 } else if (is_edp(intel_encoder)) {
815 lane_count = intel_dp->lane_count;
816 bpp = dev_priv->edp.bpp;
817 break;
818 }
819 }
820
821 /*
822 * Compute the GMCH and Link ratios. The '3' here is
823 * the number of bytes_per_pixel post-LUT, which we always
824 * set up for 8-bits of R/G/B, or 3 bytes total.
825 */
826 cdv_intel_dp_compute_m_n(bpp, lane_count,
827 mode->clock, adjusted_mode->clock, &m_n);
828
829 {
830 REG_WRITE(PIPE_GMCH_DATA_M(pipe),
831 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
832 m_n.gmch_m);
833 REG_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
834 REG_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
835 REG_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
836 }
837}
838
839static void
840cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
841 struct drm_display_mode *adjusted_mode)
842{
843 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
844 struct drm_crtc *crtc = encoder->crtc;
845 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
846 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
847 struct drm_device *dev = encoder->dev;
848
849 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
850 intel_dp->DP |= intel_dp->color_range;
851
852 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
853 intel_dp->DP |= DP_SYNC_HS_HIGH;
854 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
855 intel_dp->DP |= DP_SYNC_VS_HIGH;
856
857 intel_dp->DP |= DP_LINK_TRAIN_OFF;
858
859 switch (intel_dp->lane_count) {
860 case 1:
861 intel_dp->DP |= DP_PORT_WIDTH_1;
862 break;
863 case 2:
864 intel_dp->DP |= DP_PORT_WIDTH_2;
865 break;
866 case 4:
867 intel_dp->DP |= DP_PORT_WIDTH_4;
868 break;
869 }
870 if (intel_dp->has_audio)
871 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
872
873 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
874 intel_dp->link_configuration[0] = intel_dp->link_bw;
875 intel_dp->link_configuration[1] = intel_dp->lane_count;
876
877 /*
878 * Check for DPCD version > 1.1 and enhanced framing support
879 */
880 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
881 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
882 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
883 intel_dp->DP |= DP_ENHANCED_FRAMING;
884 }
885
886 /* CPT DP's pipe select is decided in TRANS_DP_CTL */
887 if (intel_crtc->pipe == 1)
888 intel_dp->DP |= DP_PIPEB_SELECT;
889
890 REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
891 DRM_DEBUG_KMS("DP expected reg is %x\n", intel_dp->DP);
892 if (is_edp(intel_encoder)) {
893 uint32_t pfit_control;
894 cdv_intel_edp_panel_on(intel_encoder);
895
896 if (mode->hdisplay != adjusted_mode->hdisplay ||
897 mode->vdisplay != adjusted_mode->vdisplay)
898 pfit_control = PFIT_ENABLE;
899 else
900 pfit_control = 0;
901
902 pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
903
904 REG_WRITE(PFIT_CONTROL, pfit_control);
905 }
906}
907
908
909/* If the sink supports it, try to set the power state appropriately */
910static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
911{
912 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
913 int ret, i;
914
915 /* Should have a valid DPCD by this point */
916 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
917 return;
918
919 if (mode != DRM_MODE_DPMS_ON) {
920 ret = cdv_intel_dp_aux_native_write_1(encoder, DP_SET_POWER,
921 DP_SET_POWER_D3);
922 if (ret != 1)
923 DRM_DEBUG_DRIVER("failed to write sink power state\n");
924 } else {
925 /*
926 * When turning on, we need to retry for 1ms to give the sink
927 * time to wake up.
928 */
929 for (i = 0; i < 3; i++) {
930 ret = cdv_intel_dp_aux_native_write_1(encoder,
931 DP_SET_POWER,
932 DP_SET_POWER_D0);
933 if (ret == 1)
934 break;
935 udelay(1000);
936 }
937 }
938}
939
940static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
941{
942 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
943 int edp = is_edp(intel_encoder);
944
945 if (edp) {
946 cdv_intel_edp_backlight_off(intel_encoder);
947 cdv_intel_edp_panel_off(intel_encoder);
948 cdv_intel_edp_panel_vdd_on(intel_encoder);
949 }
950 /* Wake up the sink first */
951 cdv_intel_dp_sink_dpms(intel_encoder, DRM_MODE_DPMS_ON);
952 cdv_intel_dp_link_down(intel_encoder);
953 if (edp)
954 cdv_intel_edp_panel_vdd_off(intel_encoder);
955}
956
957static void cdv_intel_dp_commit(struct drm_encoder *encoder)
958{
959 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
960 int edp = is_edp(intel_encoder);
961
962 if (edp)
963 cdv_intel_edp_panel_on(intel_encoder);
964 cdv_intel_dp_start_link_train(intel_encoder);
965 cdv_intel_dp_complete_link_train(intel_encoder);
966 if (edp)
967 cdv_intel_edp_backlight_on(intel_encoder);
968}
969
970static void
971cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
972{
973 struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
974 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
975 struct drm_device *dev = encoder->dev;
976 uint32_t dp_reg = REG_READ(intel_dp->output_reg);
977 int edp = is_edp(intel_encoder);
978
979 if (mode != DRM_MODE_DPMS_ON) {
980 if (edp) {
981 cdv_intel_edp_backlight_off(intel_encoder);
982 cdv_intel_edp_panel_vdd_on(intel_encoder);
983 }
984 cdv_intel_dp_sink_dpms(intel_encoder, mode);
985 cdv_intel_dp_link_down(intel_encoder);
986 if (edp) {
987 cdv_intel_edp_panel_vdd_off(intel_encoder);
988 cdv_intel_edp_panel_off(intel_encoder);
989 }
990 } else {
991 if (edp)
992 cdv_intel_edp_panel_on(intel_encoder);
993 cdv_intel_dp_sink_dpms(intel_encoder, mode);
994 if (!(dp_reg & DP_PORT_EN)) {
995 cdv_intel_dp_start_link_train(intel_encoder);
996 cdv_intel_dp_complete_link_train(intel_encoder);
997 }
998 if (edp)
999 cdv_intel_edp_backlight_on(intel_encoder);
1000 }
1001}
1002
1003/*
1004 * Native read with retry for link status and receiver capability reads for
1005 * cases where the sink may still be asleep.
1006 */
1007static bool
1008cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
1009 uint8_t *recv, int recv_bytes)
1010{
1011 int ret, i;
1012
1013 /*
1014 * Sinks are *supposed* to come up within 1ms from an off state,
1015 * but we're also supposed to retry 3 times per the spec.
1016 */
1017 for (i = 0; i < 3; i++) {
1018 ret = cdv_intel_dp_aux_native_read(encoder, address, recv,
1019 recv_bytes);
1020 if (ret == recv_bytes)
1021 return true;
1022 udelay(1000);
1023 }
1024
1025 return false;
1026}
1027
1028/*
1029 * Fetch AUX CH registers 0x202 - 0x207 which contain
1030 * link status information
1031 */
1032static bool
1033cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
1034{
1035 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1036 return cdv_intel_dp_aux_native_read_retry(encoder,
1037 DP_LANE0_1_STATUS,
1038 intel_dp->link_status,
1039 DP_LINK_STATUS_SIZE);
1040}
1041
1042static uint8_t
1043cdv_intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1044 int r)
1045{
1046 return link_status[r - DP_LANE0_1_STATUS];
1047}
1048
1049static uint8_t
1050cdv_intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
1051 int lane)
1052{
1053 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1054 int s = ((lane & 1) ?
1055 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1056 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1057 uint8_t l = cdv_intel_dp_link_status(link_status, i);
1058
1059 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1060}
1061
1062static uint8_t
1063cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
1064 int lane)
1065{
1066 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1067 int s = ((lane & 1) ?
1068 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1069 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1070 uint8_t l = cdv_intel_dp_link_status(link_status, i);
1071
1072 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1073}
1074
1075
1076#if 0
1077static char *voltage_names[] = {
1078 "0.4V", "0.6V", "0.8V", "1.2V"
1079};
1080static char *pre_emph_names[] = {
1081 "0dB", "3.5dB", "6dB", "9.5dB"
1082};
1083static char *link_train_names[] = {
1084 "pattern 1", "pattern 2", "idle", "off"
1085};
1086#endif
1087
1088#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
1089/*
1090static uint8_t
1091cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1092{
1093 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1094 case DP_TRAIN_VOLTAGE_SWING_400:
1095 return DP_TRAIN_PRE_EMPHASIS_6;
1096 case DP_TRAIN_VOLTAGE_SWING_600:
1097 return DP_TRAIN_PRE_EMPHASIS_6;
1098 case DP_TRAIN_VOLTAGE_SWING_800:
1099 return DP_TRAIN_PRE_EMPHASIS_3_5;
1100 case DP_TRAIN_VOLTAGE_SWING_1200:
1101 default:
1102 return DP_TRAIN_PRE_EMPHASIS_0;
1103 }
1104}
1105*/
1106static void
1107cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
1108{
1109 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1110 uint8_t v = 0;
1111 uint8_t p = 0;
1112 int lane;
1113
1114 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1115 uint8_t this_v = cdv_intel_get_adjust_request_voltage(intel_dp->link_status, lane);
1116 uint8_t this_p = cdv_intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
1117
1118 if (this_v > v)
1119 v = this_v;
1120 if (this_p > p)
1121 p = this_p;
1122 }
1123
1124 if (v >= CDV_DP_VOLTAGE_MAX)
1125 v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
1126
1127 if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
1128 p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1129
1130 for (lane = 0; lane < 4; lane++)
1131 intel_dp->train_set[lane] = v | p;
1132}
1133
1134
1135static uint8_t
1136cdv_intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1137 int lane)
1138{
1139 int i = DP_LANE0_1_STATUS + (lane >> 1);
1140 int s = (lane & 1) * 4;
1141 uint8_t l = cdv_intel_dp_link_status(link_status, i);
1142
1143 return (l >> s) & 0xf;
1144}
1145
1146/* Check for clock recovery is done on all channels */
1147static bool
1148cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1149{
1150 int lane;
1151 uint8_t lane_status;
1152
1153 for (lane = 0; lane < lane_count; lane++) {
1154 lane_status = cdv_intel_get_lane_status(link_status, lane);
1155 if ((lane_status & DP_LANE_CR_DONE) == 0)
1156 return false;
1157 }
1158 return true;
1159}
1160
1161/* Check to see if channel eq is done on all channels */
1162#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
1163 DP_LANE_CHANNEL_EQ_DONE|\
1164 DP_LANE_SYMBOL_LOCKED)
1165static bool
1166cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
1167{
1168 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1169 uint8_t lane_align;
1170 uint8_t lane_status;
1171 int lane;
1172
1173 lane_align = cdv_intel_dp_link_status(intel_dp->link_status,
1174 DP_LANE_ALIGN_STATUS_UPDATED);
1175 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1176 return false;
1177 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1178 lane_status = cdv_intel_get_lane_status(intel_dp->link_status, lane);
1179 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1180 return false;
1181 }
1182 return true;
1183}
1184
1185static bool
1186cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
1187 uint32_t dp_reg_value,
1188 uint8_t dp_train_pat)
1189{
1190
1191 struct drm_device *dev = encoder->base.dev;
1192 int ret;
1193 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1194
1195 REG_WRITE(intel_dp->output_reg, dp_reg_value);
1196 REG_READ(intel_dp->output_reg);
1197
1198 ret = cdv_intel_dp_aux_native_write_1(encoder,
1199 DP_TRAINING_PATTERN_SET,
1200 dp_train_pat);
1201
1202 if (ret != 1) {
1203 DRM_DEBUG_KMS("Failure in setting link pattern %x\n",
1204 dp_train_pat);
1205 return false;
1206 }
1207
1208 return true;
1209}
1210
1211
1212static bool
1213cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
1214 uint8_t dp_train_pat)
1215{
1216
1217 int ret;
1218 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1219
1220 ret = cdv_intel_dp_aux_native_write(encoder,
1221 DP_TRAINING_LANE0_SET,
1222 intel_dp->train_set,
1223 intel_dp->lane_count);
1224
1225 if (ret != intel_dp->lane_count) {
1226 DRM_DEBUG_KMS("Failure in setting level %d, lane_cnt= %d\n",
1227 intel_dp->train_set[0], intel_dp->lane_count);
1228 return false;
1229 }
1230 return true;
1231}
1232
1233static void
1234cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
1235{
1236 struct drm_device *dev = encoder->base.dev;
1237 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1238 struct ddi_regoff *ddi_reg;
1239 int vswing, premph, index;
1240
1241 if (intel_dp->output_reg == DP_B)
1242 ddi_reg = &ddi_DP_train_table[0];
1243 else
1244 ddi_reg = &ddi_DP_train_table[1];
1245
1246 vswing = (signal_level & DP_TRAIN_VOLTAGE_SWING_MASK);
1247 premph = ((signal_level & DP_TRAIN_PRE_EMPHASIS_MASK)) >>
1248 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1249
1250 if (vswing + premph > 3)
1251 return;
1252#ifdef CDV_FAST_LINK_TRAIN
1253 return;
1254#endif
1255 DRM_DEBUG_KMS("Test2\n");
1256 //return ;
1257 cdv_sb_reset(dev);
1258 /* ;Swing voltage programming
1259 ;gfx_dpio_set_reg(0xc058, 0x0505313A) */
1260 cdv_sb_write(dev, ddi_reg->VSwing5, 0x0505313A);
1261
1262 /* ;gfx_dpio_set_reg(0x8154, 0x43406055) */
1263 cdv_sb_write(dev, ddi_reg->VSwing1, 0x43406055);
1264
1265 /* ;gfx_dpio_set_reg(0x8148, 0x55338954)
1266 * The VSwing_PreEmph table is also considered based on the vswing/premp
1267 */
1268 index = (vswing + premph) * 2;
1269 if (premph == 1 && vswing == 1) {
1270 cdv_sb_write(dev, ddi_reg->VSwing2, 0x055738954);
1271 } else
1272 cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
1273
1274 /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
1275 if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200)
1276 cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
1277 else
1278 cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
1279
1280 /* ;gfx_dpio_set_reg(0x8150, 0x2b405555) */
1281 /* cdv_sb_write(dev, ddi_reg->VSwing4, 0x2b405555); */
1282
1283 /* ;gfx_dpio_set_reg(0x8154, 0xc3406055) */
1284 cdv_sb_write(dev, ddi_reg->VSwing1, 0xc3406055);
1285
1286 /* ;Pre emphasis programming
1287 * ;gfx_dpio_set_reg(0xc02c, 0x1f030040)
1288 */
1289 cdv_sb_write(dev, ddi_reg->PreEmph1, 0x1f030040);
1290
1291 /* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
1292 index = 2 * premph + 1;
1293 cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
1294 return;
1295}
1296
1297
1298/* Enable corresponding port and start training pattern 1 */
1299static void
1300cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
1301{
1302 struct drm_device *dev = encoder->base.dev;
1303 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1304 int i;
1305 uint8_t voltage;
1306 bool clock_recovery = false;
1307 int tries;
1308 u32 reg;
1309 uint32_t DP = intel_dp->DP;
1310
1311 DP |= DP_PORT_EN;
1312 DP &= ~DP_LINK_TRAIN_MASK;
1313
1314 reg = DP;
1315 reg |= DP_LINK_TRAIN_PAT_1;
1316 /* Enable output, wait for it to become active */
1317 REG_WRITE(intel_dp->output_reg, reg);
1318 REG_READ(intel_dp->output_reg);
1319 psb_intel_wait_for_vblank(dev);
1320
1321 DRM_DEBUG_KMS("Link config\n");
1322 /* Write the link configuration data */
1323 cdv_intel_dp_aux_native_write(encoder, DP_LINK_BW_SET,
1324 intel_dp->link_configuration,
1325 2);
1326
1327 memset(intel_dp->train_set, 0, 4);
1328 voltage = 0;
1329 tries = 0;
1330 clock_recovery = false;
1331
1332 DRM_DEBUG_KMS("Start train\n");
1333 reg = DP | DP_LINK_TRAIN_PAT_1;
1334
1335
1336 for (;;) {
1337 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1338 DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
1339 intel_dp->train_set[0],
1340 intel_dp->link_configuration[0],
1341 intel_dp->link_configuration[1]);
1342
1343 if (!cdv_intel_dp_set_link_train(encoder, reg, DP_TRAINING_PATTERN_1)) {
1344 DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 1\n");
1345 }
1346 cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
1347 /* Set training pattern 1 */
1348
1349 cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_1);
1350
1351 udelay(200);
1352 if (!cdv_intel_dp_get_link_status(encoder))
1353 break;
1354
1355 DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
1356 intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
1357 intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
1358
1359 if (cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1360 DRM_DEBUG_KMS("PT1 train is done\n");
1361 clock_recovery = true;
1362 break;
1363 }
1364
1365 /* Check to see if we've tried the max voltage */
1366 for (i = 0; i < intel_dp->lane_count; i++)
1367 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1368 break;
1369 if (i == intel_dp->lane_count)
1370 break;
1371
1372 /* Check to see if we've tried the same voltage 5 times */
1373 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1374 ++tries;
1375 if (tries == 5)
1376 break;
1377 } else
1378 tries = 0;
1379 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1380
1381 /* Compute new intel_dp->train_set as requested by target */
1382 cdv_intel_get_adjust_train(encoder);
1383
1384 }
1385
1386 if (!clock_recovery) {
1387 DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
1388 }
1389
1390 intel_dp->DP = DP;
1391}
1392
1393static void
1394cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
1395{
1396 struct drm_device *dev = encoder->base.dev;
1397 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1398 bool channel_eq = false;
1399 int tries, cr_tries;
1400 u32 reg;
1401 uint32_t DP = intel_dp->DP;
1402
1403 /* channel equalization */
1404 tries = 0;
1405 cr_tries = 0;
1406 channel_eq = false;
1407
1408 DRM_DEBUG_KMS("\n");
1409 reg = DP | DP_LINK_TRAIN_PAT_2;
1410
1411 for (;;) {
1412
1413 DRM_DEBUG_KMS("DP Link Train Set %x, Link_config %x, %x\n",
1414 intel_dp->train_set[0],
1415 intel_dp->link_configuration[0],
1416 intel_dp->link_configuration[1]);
1417 /* channel eq pattern */
1418
1419 if (!cdv_intel_dp_set_link_train(encoder, reg,
1420 DP_TRAINING_PATTERN_2)) {
1421 DRM_DEBUG_KMS("Failure in aux-transfer setting pattern 2\n");
1422 }
1423 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1424
1425 if (cr_tries > 5) {
1426 DRM_ERROR("failed to train DP, aborting\n");
1427 cdv_intel_dp_link_down(encoder);
1428 break;
1429 }
1430
1431 cdv_intel_dp_set_vswing_premph(encoder, intel_dp->train_set[0]);
1432
1433 cdv_intel_dplink_set_level(encoder, DP_TRAINING_PATTERN_2);
1434
1435 udelay(1000);
1436 if (!cdv_intel_dp_get_link_status(encoder))
1437 break;
1438
1439 DRM_DEBUG_KMS("DP Link status %x, %x, %x, %x, %x, %x\n",
1440 intel_dp->link_status[0], intel_dp->link_status[1], intel_dp->link_status[2],
1441 intel_dp->link_status[3], intel_dp->link_status[4], intel_dp->link_status[5]);
1442
1443 /* Make sure clock is still ok */
1444 if (!cdv_intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1445 cdv_intel_dp_start_link_train(encoder);
1446 cr_tries++;
1447 continue;
1448 }
1449
1450 if (cdv_intel_channel_eq_ok(encoder)) {
1451 DRM_DEBUG_KMS("PT2 train is done\n");
1452 channel_eq = true;
1453 break;
1454 }
1455
1456 /* Try 5 times, then try clock recovery if that fails */
1457 if (tries > 5) {
1458 cdv_intel_dp_link_down(encoder);
1459 cdv_intel_dp_start_link_train(encoder);
1460 tries = 0;
1461 cr_tries++;
1462 continue;
1463 }
1464
1465 /* Compute new intel_dp->train_set as requested by target */
1466 cdv_intel_get_adjust_train(encoder);
1467 ++tries;
1468
1469 }
1470
1471 reg = DP | DP_LINK_TRAIN_OFF;
1472
1473 REG_WRITE(intel_dp->output_reg, reg);
1474 REG_READ(intel_dp->output_reg);
1475 cdv_intel_dp_aux_native_write_1(encoder,
1476 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1477}
1478
1479static void
1480cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
1481{
1482 struct drm_device *dev = encoder->base.dev;
1483 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1484 uint32_t DP = intel_dp->DP;
1485
1486 if ((REG_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1487 return;
1488
1489 DRM_DEBUG_KMS("\n");
1490
1491
1492 {
1493 DP &= ~DP_LINK_TRAIN_MASK;
1494 REG_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
1495 }
1496 REG_READ(intel_dp->output_reg);
1497
1498 msleep(17);
1499
1500 REG_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1501 REG_READ(intel_dp->output_reg);
1502}
1503
1504static enum drm_connector_status
1505cdv_dp_detect(struct psb_intel_encoder *encoder)
1506{
1507 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1508 enum drm_connector_status status;
1509
1510 status = connector_status_disconnected;
1511 if (cdv_intel_dp_aux_native_read(encoder, 0x000, intel_dp->dpcd,
1512 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1513 {
1514 if (intel_dp->dpcd[DP_DPCD_REV] != 0)
1515 status = connector_status_connected;
1516 }
1517 if (status == connector_status_connected)
1518 DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
1519 intel_dp->dpcd[0], intel_dp->dpcd[1],
1520 intel_dp->dpcd[2], intel_dp->dpcd[3]);
1521 return status;
1522}
1523
1524/**
1525 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
1526 *
1527 * \return true if DP port is connected.
1528 * \return false if DP port is disconnected.
1529 */
1530static enum drm_connector_status
1531cdv_intel_dp_detect(struct drm_connector *connector, bool force)
1532{
1533 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
1534 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1535 enum drm_connector_status status;
1536 struct edid *edid = NULL;
1537 int edp = is_edp(encoder);
1538
1539 intel_dp->has_audio = false;
1540
1541 if (edp)
1542 cdv_intel_edp_panel_vdd_on(encoder);
1543 status = cdv_dp_detect(encoder);
1544 if (status != connector_status_connected) {
1545 if (edp)
1546 cdv_intel_edp_panel_vdd_off(encoder);
1547 return status;
1548 }
1549
1550 if (intel_dp->force_audio) {
1551 intel_dp->has_audio = intel_dp->force_audio > 0;
1552 } else {
1553 edid = drm_get_edid(connector, &intel_dp->adapter);
1554 if (edid) {
1555 intel_dp->has_audio = drm_detect_monitor_audio(edid);
1556 kfree(edid);
1557 }
1558 }
1559 if (edp)
1560 cdv_intel_edp_panel_vdd_off(encoder);
1561
1562 return connector_status_connected;
1563}
1564
1565static int cdv_intel_dp_get_modes(struct drm_connector *connector)
1566{
1567 struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
1568 struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
1569 struct edid *edid = NULL;
1570 int ret = 0;
1571 int edp = is_edp(intel_encoder);
1572
1573
1574 edid = drm_get_edid(connector, &intel_dp->adapter);
1575 if (edid) {
1576 drm_mode_connector_update_edid_property(connector, edid);
1577 ret = drm_add_edid_modes(connector, edid);
1578 kfree(edid);
1579 }
1580
1581 if (is_edp(intel_encoder)) {
1582 struct drm_device *dev = connector->dev;
1583 struct drm_psb_private *dev_priv = dev->dev_private;
1584
1585 cdv_intel_edp_panel_vdd_off(intel_encoder);
1586 if (ret) {
1587 if (edp && !intel_dp->panel_fixed_mode) {
1588 struct drm_display_mode *newmode;
1589 list_for_each_entry(newmode, &connector->probed_modes,
1590 head) {
1591 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1592 intel_dp->panel_fixed_mode =
1593 drm_mode_duplicate(dev, newmode);
1594 break;
1595 }
1596 }
1597 }
1598
1599 return ret;
1600 }
1601 if (!intel_dp->panel_fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
1602 intel_dp->panel_fixed_mode =
1603 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1604 if (intel_dp->panel_fixed_mode) {
1605 intel_dp->panel_fixed_mode->type |=
1606 DRM_MODE_TYPE_PREFERRED;
1607 }
1608 }
1609 if (intel_dp->panel_fixed_mode != NULL) {
1610 struct drm_display_mode *mode;
1611 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
1612 drm_mode_probed_add(connector, mode);
1613 return 1;
1614 }
1615 }
1616
1617 return ret;
1618}
1619
1620static bool
1621cdv_intel_dp_detect_audio(struct drm_connector *connector)
1622{
1623 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
1624 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1625 struct edid *edid;
1626 bool has_audio = false;
1627 int edp = is_edp(encoder);
1628
1629 if (edp)
1630 cdv_intel_edp_panel_vdd_on(encoder);
1631
1632 edid = drm_get_edid(connector, &intel_dp->adapter);
1633 if (edid) {
1634 has_audio = drm_detect_monitor_audio(edid);
1635 kfree(edid);
1636 }
1637 if (edp)
1638 cdv_intel_edp_panel_vdd_off(encoder);
1639
1640 return has_audio;
1641}
1642
1643static int
1644cdv_intel_dp_set_property(struct drm_connector *connector,
1645 struct drm_property *property,
1646 uint64_t val)
1647{
1648 struct drm_psb_private *dev_priv = connector->dev->dev_private;
1649 struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
1650 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1651 int ret;
1652
1653 ret = drm_connector_property_set_value(connector, property, val);
1654 if (ret)
1655 return ret;
1656
1657 if (property == dev_priv->force_audio_property) {
1658 int i = val;
1659 bool has_audio;
1660
1661 if (i == intel_dp->force_audio)
1662 return 0;
1663
1664 intel_dp->force_audio = i;
1665
1666 if (i == 0)
1667 has_audio = cdv_intel_dp_detect_audio(connector);
1668 else
1669 has_audio = i > 0;
1670
1671 if (has_audio == intel_dp->has_audio)
1672 return 0;
1673
1674 intel_dp->has_audio = has_audio;
1675 goto done;
1676 }
1677
1678 if (property == dev_priv->broadcast_rgb_property) {
1679 if (val == !!intel_dp->color_range)
1680 return 0;
1681
1682 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
1683 goto done;
1684 }
1685
1686 return -EINVAL;
1687
1688done:
1689 if (encoder->base.crtc) {
1690 struct drm_crtc *crtc = encoder->base.crtc;
1691 drm_crtc_helper_set_mode(crtc, &crtc->mode,
1692 crtc->x, crtc->y,
1693 crtc->fb);
1694 }
1695
1696 return 0;
1697}
1698
1699static void
1700cdv_intel_dp_destroy(struct drm_connector *connector)
1701{
1702 struct psb_intel_encoder *psb_intel_encoder =
1703 psb_intel_attached_encoder(connector);
1704 struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
1705
1706 if (is_edp(psb_intel_encoder)) {
1707 /* cdv_intel_panel_destroy_backlight(connector->dev); */
1708 if (intel_dp->panel_fixed_mode) {
1709 kfree(intel_dp->panel_fixed_mode);
1710 intel_dp->panel_fixed_mode = NULL;
1711 }
1712 }
1713 i2c_del_adapter(&intel_dp->adapter);
1714 drm_sysfs_connector_remove(connector);
1715 drm_connector_cleanup(connector);
1716 kfree(connector);
1717}
1718
1719static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
1720{
1721 drm_encoder_cleanup(encoder);
1722}
1723
1724static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
1725 .dpms = cdv_intel_dp_dpms,
1726 .mode_fixup = cdv_intel_dp_mode_fixup,
1727 .prepare = cdv_intel_dp_prepare,
1728 .mode_set = cdv_intel_dp_mode_set,
1729 .commit = cdv_intel_dp_commit,
1730};
1731
1732static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
1733 .dpms = drm_helper_connector_dpms,
1734 .detect = cdv_intel_dp_detect,
1735 .fill_modes = drm_helper_probe_single_connector_modes,
1736 .set_property = cdv_intel_dp_set_property,
1737 .destroy = cdv_intel_dp_destroy,
1738};
1739
1740static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
1741 .get_modes = cdv_intel_dp_get_modes,
1742 .mode_valid = cdv_intel_dp_mode_valid,
1743 .best_encoder = psb_intel_best_encoder,
1744};
1745
1746static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
1747 .destroy = cdv_intel_dp_encoder_destroy,
1748};
1749
1750
1751static void cdv_intel_dp_add_properties(struct drm_connector *connector)
1752{
1753 cdv_intel_attach_force_audio_property(connector);
1754 cdv_intel_attach_broadcast_rgb_property(connector);
1755}
1756
1757/* check the VBT to see whether the eDP is on DP-D port */
1758static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
1759{
1760 struct drm_psb_private *dev_priv = dev->dev_private;
1761 struct child_device_config *p_child;
1762 int i;
1763
1764 if (!dev_priv->child_dev_num)
1765 return false;
1766
1767 for (i = 0; i < dev_priv->child_dev_num; i++) {
1768 p_child = dev_priv->child_dev + i;
1769
1770 if (p_child->dvo_port == PORT_IDPC &&
1771 p_child->device_type == DEVICE_TYPE_eDP)
1772 return true;
1773 }
1774 return false;
1775}
1776
1777/* Cedarview display clock gating
1778
1779 We need this disable dot get correct behaviour while enabling
1780 DP/eDP. TODO - investigate if we can turn it back to normality
1781 after enabling */
1782static void cdv_disable_intel_clock_gating(struct drm_device *dev)
1783{
1784 u32 reg_value;
1785 reg_value = REG_READ(DSPCLK_GATE_D);
1786
1787 reg_value |= (DPUNIT_PIPEB_GATE_DISABLE |
1788 DPUNIT_PIPEA_GATE_DISABLE |
1789 DPCUNIT_CLOCK_GATE_DISABLE |
1790 DPLSUNIT_CLOCK_GATE_DISABLE |
1791 DPOUNIT_CLOCK_GATE_DISABLE |
1792 DPIOUNIT_CLOCK_GATE_DISABLE);
1793
1794 REG_WRITE(DSPCLK_GATE_D, reg_value);
1795
1796 udelay(500);
1797}
1798
1799void
1800cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
1801{
1802 struct psb_intel_encoder *psb_intel_encoder;
1803 struct psb_intel_connector *psb_intel_connector;
1804 struct drm_connector *connector;
1805 struct drm_encoder *encoder;
1806 struct cdv_intel_dp *intel_dp;
1807 const char *name = NULL;
1808 int type = DRM_MODE_CONNECTOR_DisplayPort;
1809
1810 psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
1811 if (!psb_intel_encoder)
1812 return;
1813 psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
1814 if (!psb_intel_connector)
1815 goto err_connector;
1816 intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
1817 if (!intel_dp)
1818 goto err_priv;
1819
1820 if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
1821 type = DRM_MODE_CONNECTOR_eDP;
1822
1823 connector = &psb_intel_connector->base;
1824 encoder = &psb_intel_encoder->base;
1825
1826 drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
1827 drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
1828
1829 psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
1830
1831 if (type == DRM_MODE_CONNECTOR_DisplayPort)
1832 psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1833 else
1834 psb_intel_encoder->type = INTEL_OUTPUT_EDP;
1835
1836
1837 psb_intel_encoder->dev_priv=intel_dp;
1838 intel_dp->encoder = psb_intel_encoder;
1839 intel_dp->output_reg = output_reg;
1840
1841 drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
1842 drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
1843
1844 connector->polled = DRM_CONNECTOR_POLL_HPD;
1845 connector->interlace_allowed = false;
1846 connector->doublescan_allowed = false;
1847
1848 drm_sysfs_connector_add(connector);
1849
1850 /* Set up the DDC bus. */
1851 switch (output_reg) {
1852 case DP_B:
1853 name = "DPDDC-B";
1854 psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
1855 break;
1856 case DP_C:
1857 name = "DPDDC-C";
1858 psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
1859 break;
1860 }
1861
1862 cdv_disable_intel_clock_gating(dev);
1863
1864 cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
1865 /* FIXME:fail check */
1866 cdv_intel_dp_add_properties(connector);
1867
1868 if (is_edp(psb_intel_encoder)) {
1869 int ret;
1870 struct edp_power_seq cur;
1871 u32 pp_on, pp_off, pp_div;
1872 u32 pwm_ctrl;
1873
1874 pp_on = REG_READ(PP_CONTROL);
1875 pp_on &= ~PANEL_UNLOCK_MASK;
1876 pp_on |= PANEL_UNLOCK_REGS;
1877
1878 REG_WRITE(PP_CONTROL, pp_on);
1879
1880 pwm_ctrl = REG_READ(BLC_PWM_CTL2);
1881 pwm_ctrl |= PWM_PIPE_B;
1882 REG_WRITE(BLC_PWM_CTL2, pwm_ctrl);
1883
1884 pp_on = REG_READ(PP_ON_DELAYS);
1885 pp_off = REG_READ(PP_OFF_DELAYS);
1886 pp_div = REG_READ(PP_DIVISOR);
1887
1888 /* Pull timing values out of registers */
1889 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
1890 PANEL_POWER_UP_DELAY_SHIFT;
1891
1892 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
1893 PANEL_LIGHT_ON_DELAY_SHIFT;
1894
1895 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
1896 PANEL_LIGHT_OFF_DELAY_SHIFT;
1897
1898 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
1899 PANEL_POWER_DOWN_DELAY_SHIFT;
1900
1901 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
1902 PANEL_POWER_CYCLE_DELAY_SHIFT);
1903
1904 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1905 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
1906
1907
1908 intel_dp->panel_power_up_delay = cur.t1_t3 / 10;
1909 intel_dp->backlight_on_delay = cur.t8 / 10;
1910 intel_dp->backlight_off_delay = cur.t9 / 10;
1911 intel_dp->panel_power_down_delay = cur.t10 / 10;
1912 intel_dp->panel_power_cycle_delay = (cur.t11_t12 - 1) * 100;
1913
1914 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
1915 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
1916 intel_dp->panel_power_cycle_delay);
1917
1918 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
1919 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
1920
1921
1922 cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
1923 ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
1924 intel_dp->dpcd,
1925 sizeof(intel_dp->dpcd));
1926 cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
1927 if (ret == 0) {
1928 /* if this fails, presume the device is a ghost */
1929 DRM_INFO("failed to retrieve link info, disabling eDP\n");
1930 cdv_intel_dp_encoder_destroy(encoder);
1931 cdv_intel_dp_destroy(connector);
1932 goto err_priv;
1933 } else {
1934 DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
1935 intel_dp->dpcd[0], intel_dp->dpcd[1],
1936 intel_dp->dpcd[2], intel_dp->dpcd[3]);
1937
1938 }
1939 /* The CDV reference driver moves pnale backlight setup into the displays that
1940 have a backlight: this is a good idea and one we should probably adopt, however
1941 we need to migrate all the drivers before we can do that */
1942 /*cdv_intel_panel_setup_backlight(dev); */
1943 }
1944 return;
1945
1946err_priv:
1947 kfree(psb_intel_connector);
1948err_connector:
1949 kfree(psb_intel_encoder);
1950}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index a86f87b9ddde..7272a461edfe 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -139,8 +139,6 @@ static enum drm_connector_status cdv_hdmi_detect(
139{ 139{
140 struct psb_intel_encoder *psb_intel_encoder = 140 struct psb_intel_encoder *psb_intel_encoder =
141 psb_intel_attached_encoder(connector); 141 psb_intel_attached_encoder(connector);
142 struct psb_intel_connector *psb_intel_connector =
143 to_psb_intel_connector(connector);
144 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv; 142 struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
145 struct edid *edid = NULL; 143 struct edid *edid = NULL;
146 enum drm_connector_status status = connector_status_disconnected; 144 enum drm_connector_status status = connector_status_disconnected;
@@ -157,8 +155,6 @@ static enum drm_connector_status cdv_hdmi_detect(
157 hdmi_priv->has_hdmi_audio = 155 hdmi_priv->has_hdmi_audio =
158 drm_detect_monitor_audio(edid); 156 drm_detect_monitor_audio(edid);
159 } 157 }
160
161 psb_intel_connector->base.display_info.raw_edid = NULL;
162 kfree(edid); 158 kfree(edid);
163 } 159 }
164 return status; 160 return status;
@@ -352,9 +348,11 @@ void cdv_hdmi_init(struct drm_device *dev,
352 switch (reg) { 348 switch (reg) {
353 case SDVOB: 349 case SDVOB:
354 ddc_bus = GPIOE; 350 ddc_bus = GPIOE;
351 psb_intel_encoder->ddi_select = DDI0_SELECT;
355 break; 352 break;
356 case SDVOC: 353 case SDVOC:
357 ddc_bus = GPIOD; 354 ddc_bus = GPIOD;
355 psb_intel_encoder->ddi_select = DDI1_SELECT;
358 break; 356 break;
359 default: 357 default:
360 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg); 358 DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index c7f9468b74ba..b362dd39bf5a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -506,16 +506,8 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
506 property, 506 property,
507 value)) 507 value))
508 return -1; 508 return -1;
509 else { 509 else
510#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 510 gma_backlight_set(encoder->dev, value);
511 struct drm_psb_private *dev_priv =
512 encoder->dev->dev_private;
513 struct backlight_device *bd =
514 dev_priv->backlight_device;
515 bd->props.brightness = value;
516 backlight_update_status(bd);
517#endif
518 }
519 } else if (!strcmp(property->name, "DPMS") && encoder) { 511 } else if (!strcmp(property->name, "DPMS") && encoder) {
520 struct drm_encoder_helper_funcs *helpers = 512 struct drm_encoder_helper_funcs *helpers =
521 encoder->helper_private; 513 encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 5732b5702e1c..884ba73ac6ce 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -764,6 +764,13 @@ static void psb_setup_outputs(struct drm_device *dev)
764 crtc_mask = dev_priv->ops->hdmi_mask; 764 crtc_mask = dev_priv->ops->hdmi_mask;
765 clone_mask = (1 << INTEL_OUTPUT_HDMI); 765 clone_mask = (1 << INTEL_OUTPUT_HDMI);
766 break; 766 break;
767 case INTEL_OUTPUT_DISPLAYPORT:
768 crtc_mask = (1 << 0) | (1 << 1);
769 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
770 break;
771 case INTEL_OUTPUT_EDP:
772 crtc_mask = (1 << 1);
773 clone_mask = (1 << INTEL_OUTPUT_EDP);
767 } 774 }
768 encoder->possible_crtcs = crtc_mask; 775 encoder->possible_crtcs = crtc_mask;
769 encoder->possible_clones = 776 encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index f3a1ae8eb77b..eefd6cc5b80d 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -36,7 +36,12 @@ int psb_gem_init_object(struct drm_gem_object *obj)
36void psb_gem_free_object(struct drm_gem_object *obj) 36void psb_gem_free_object(struct drm_gem_object *obj)
37{ 37{
38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem); 38 struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
39 drm_gem_object_release_wrap(obj); 39
40 /* Remove the list map if one is present */
41 if (obj->map_list.map)
42 drm_gem_free_mmap_offset(obj);
43 drm_gem_object_release(obj);
44
40 /* This must occur last as it frees up the memory of the GEM object */ 45 /* This must occur last as it frees up the memory of the GEM object */
41 psb_gtt_free_range(obj->dev, gtt); 46 psb_gtt_free_range(obj->dev, gtt);
42} 47}
@@ -77,7 +82,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
77 82
78 /* Make it mmapable */ 83 /* Make it mmapable */
79 if (!obj->map_list.map) { 84 if (!obj->map_list.map) {
80 ret = gem_create_mmap_offset(obj); 85 ret = drm_gem_create_mmap_offset(obj);
81 if (ret) 86 if (ret)
82 goto out; 87 goto out;
83 } 88 }
diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
deleted file mode 100644
index 3c17634f6061..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/**************************************************************************
2 * Copyright (c) 2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20#include <drm/drmP.h>
21#include <drm/drm.h>
22#include "gem_glue.h"
23
24void drm_gem_object_release_wrap(struct drm_gem_object *obj)
25{
26 /* Remove the list map if one is present */
27 if (obj->map_list.map) {
28 struct drm_gem_mm *mm = obj->dev->mm_private;
29 struct drm_map_list *list = &obj->map_list;
30 drm_ht_remove_item(&mm->offset_hash, &list->hash);
31 drm_mm_put_block(list->file_offset_node);
32 kfree(list->map);
33 list->map = NULL;
34 }
35 drm_gem_object_release(obj);
36}
37
38/**
39 * gem_create_mmap_offset - invent an mmap offset
40 * @obj: our object
41 *
42 * Standard implementation of offset generation for mmap as is
43 * duplicated in several drivers. This belongs in GEM.
44 */
45int gem_create_mmap_offset(struct drm_gem_object *obj)
46{
47 struct drm_device *dev = obj->dev;
48 struct drm_gem_mm *mm = dev->mm_private;
49 struct drm_map_list *list;
50 struct drm_local_map *map;
51 int ret;
52
53 list = &obj->map_list;
54 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
55 if (list->map == NULL)
56 return -ENOMEM;
57 map = list->map;
58 map->type = _DRM_GEM;
59 map->size = obj->size;
60 map->handle = obj;
61
62 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
63 obj->size / PAGE_SIZE, 0, 0);
64 if (!list->file_offset_node) {
65 dev_err(dev->dev, "failed to allocate offset for bo %d\n",
66 obj->name);
67 ret = -ENOSPC;
68 goto free_it;
69 }
70 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
71 obj->size / PAGE_SIZE, 0);
72 if (!list->file_offset_node) {
73 ret = -ENOMEM;
74 goto free_it;
75 }
76 list->hash.key = list->file_offset_node->start;
77 ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
78 if (ret) {
79 dev_err(dev->dev, "failed to add to map hash\n");
80 goto free_mm;
81 }
82 return 0;
83
84free_mm:
85 drm_mm_put_block(list->file_offset_node);
86free_it:
87 kfree(list->map);
88 list->map = NULL;
89 return ret;
90}
diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
deleted file mode 100644
index ce5ce30f74db..000000000000
--- a/drivers/gpu/drm/gma500/gem_glue.h
+++ /dev/null
@@ -1,2 +0,0 @@
1extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
2extern int gem_create_mmap_offset(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index a837ee97787c..403fffb03abd 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -54,6 +54,98 @@ static void *find_section(struct bdb_header *bdb, int section_id)
54 return NULL; 54 return NULL;
55} 55}
56 56
57static void
58parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
59{
60 struct bdb_edp *edp;
61 struct edp_power_seq *edp_pps;
62 struct edp_link_params *edp_link_params;
63 uint8_t panel_type;
64
65 edp = find_section(bdb, BDB_EDP);
66
67 dev_priv->edp.bpp = 18;
68 if (!edp) {
69 if (dev_priv->edp.support) {
70 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported, assume %dbpp panel color depth.\n",
71 dev_priv->edp.bpp);
72 }
73 return;
74 }
75
76 panel_type = dev_priv->panel_type;
77 switch ((edp->color_depth >> (panel_type * 2)) & 3) {
78 case EDP_18BPP:
79 dev_priv->edp.bpp = 18;
80 break;
81 case EDP_24BPP:
82 dev_priv->edp.bpp = 24;
83 break;
84 case EDP_30BPP:
85 dev_priv->edp.bpp = 30;
86 break;
87 }
88
89 /* Get the eDP sequencing and link info */
90 edp_pps = &edp->power_seqs[panel_type];
91 edp_link_params = &edp->link_params[panel_type];
92
93 dev_priv->edp.pps = *edp_pps;
94
95 DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
96 dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8,
97 dev_priv->edp.pps.t9, dev_priv->edp.pps.t10,
98 dev_priv->edp.pps.t11_t12);
99
100 dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
101 DP_LINK_BW_1_62;
102 switch (edp_link_params->lanes) {
103 case 0:
104 dev_priv->edp.lanes = 1;
105 break;
106 case 1:
107 dev_priv->edp.lanes = 2;
108 break;
109 case 3:
110 default:
111 dev_priv->edp.lanes = 4;
112 break;
113 }
114 DRM_DEBUG_KMS("VBT reports EDP: Lane_count %d, Lane_rate %d, Bpp %d\n",
115 dev_priv->edp.lanes, dev_priv->edp.rate, dev_priv->edp.bpp);
116
117 switch (edp_link_params->preemphasis) {
118 case 0:
119 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
120 break;
121 case 1:
122 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
123 break;
124 case 2:
125 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
126 break;
127 case 3:
128 dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
129 break;
130 }
131 switch (edp_link_params->vswing) {
132 case 0:
133 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
134 break;
135 case 1:
136 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
137 break;
138 case 2:
139 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
140 break;
141 case 3:
142 dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
143 break;
144 }
145 DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
146 dev_priv->edp.vswing, dev_priv->edp.preemphasis);
147}
148
57static u16 149static u16
58get_blocksize(void *p) 150get_blocksize(void *p)
59{ 151{
@@ -154,6 +246,8 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
154 return; 246 return;
155 247
156 dev_priv->lvds_dither = lvds_options->pixel_dither; 248 dev_priv->lvds_dither = lvds_options->pixel_dither;
249 dev_priv->panel_type = lvds_options->panel_type;
250
157 if (lvds_options->panel_type == 0xff) 251 if (lvds_options->panel_type == 0xff)
158 return; 252 return;
159 253
@@ -340,6 +434,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
340 if (!driver) 434 if (!driver)
341 return; 435 return;
342 436
437 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
438 dev_priv->edp.support = 1;
439
343 /* This bit means to use 96Mhz for DPLL_A or not */ 440 /* This bit means to use 96Mhz for DPLL_A or not */
344 if (driver->primary_lfp_id) 441 if (driver->primary_lfp_id)
345 dev_priv->dplla_96mhz = true; 442 dev_priv->dplla_96mhz = true;
@@ -437,6 +534,9 @@ int psb_intel_init_bios(struct drm_device *dev)
437 size_t size; 534 size_t size;
438 int i; 535 int i;
439 536
537
538 dev_priv->panel_type = 0xff;
539
440 /* XXX Should this validation be moved to intel_opregion.c? */ 540 /* XXX Should this validation be moved to intel_opregion.c? */
441 if (dev_priv->opregion.vbt) { 541 if (dev_priv->opregion.vbt) {
442 struct vbt_header *vbt = dev_priv->opregion.vbt; 542 struct vbt_header *vbt = dev_priv->opregion.vbt;
@@ -477,6 +577,7 @@ int psb_intel_init_bios(struct drm_device *dev)
477 parse_sdvo_device_mapping(dev_priv, bdb); 577 parse_sdvo_device_mapping(dev_priv, bdb);
478 parse_device_mapping(dev_priv, bdb); 578 parse_device_mapping(dev_priv, bdb);
479 parse_backlight_data(dev_priv, bdb); 579 parse_backlight_data(dev_priv, bdb);
580 parse_edp(dev_priv, bdb);
480 581
481 if (bios) 582 if (bios)
482 pci_unmap_rom(pdev, bios); 583 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 2e95523b84b1..c6267c98c9e7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -23,6 +23,7 @@
23#define _I830_BIOS_H_ 23#define _I830_BIOS_H_
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_dp_helper.h>
26 27
27struct vbt_header { 28struct vbt_header {
28 u8 signature[20]; /**< Always starts with 'VBT$' */ 29 u8 signature[20]; /**< Always starts with 'VBT$' */
@@ -93,6 +94,7 @@ struct vbios_data {
93#define BDB_SDVO_LVDS_PNP_IDS 24 94#define BDB_SDVO_LVDS_PNP_IDS 24
94#define BDB_SDVO_LVDS_POWER_SEQ 25 95#define BDB_SDVO_LVDS_POWER_SEQ 25
95#define BDB_TV_OPTIONS 26 96#define BDB_TV_OPTIONS 26
97#define BDB_EDP 27
96#define BDB_LVDS_OPTIONS 40 98#define BDB_LVDS_OPTIONS 40
97#define BDB_LVDS_LFP_DATA_PTRS 41 99#define BDB_LVDS_LFP_DATA_PTRS 41
98#define BDB_LVDS_LFP_DATA 42 100#define BDB_LVDS_LFP_DATA 42
@@ -391,6 +393,11 @@ struct bdb_sdvo_lvds_options {
391 u8 panel_misc_bits_4; 393 u8 panel_misc_bits_4;
392} __attribute__((packed)); 394} __attribute__((packed));
393 395
396#define BDB_DRIVER_FEATURE_NO_LVDS 0
397#define BDB_DRIVER_FEATURE_INT_LVDS 1
398#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
399#define BDB_DRIVER_FEATURE_EDP 3
400
394struct bdb_driver_features { 401struct bdb_driver_features {
395 u8 boot_dev_algorithm:1; 402 u8 boot_dev_algorithm:1;
396 u8 block_display_switch:1; 403 u8 block_display_switch:1;
@@ -431,6 +438,45 @@ struct bdb_driver_features {
431 u8 custom_vbt_version; 438 u8 custom_vbt_version;
432} __attribute__((packed)); 439} __attribute__((packed));
433 440
441#define EDP_18BPP 0
442#define EDP_24BPP 1
443#define EDP_30BPP 2
444#define EDP_RATE_1_62 0
445#define EDP_RATE_2_7 1
446#define EDP_LANE_1 0
447#define EDP_LANE_2 1
448#define EDP_LANE_4 3
449#define EDP_PREEMPHASIS_NONE 0
450#define EDP_PREEMPHASIS_3_5dB 1
451#define EDP_PREEMPHASIS_6dB 2
452#define EDP_PREEMPHASIS_9_5dB 3
453#define EDP_VSWING_0_4V 0
454#define EDP_VSWING_0_6V 1
455#define EDP_VSWING_0_8V 2
456#define EDP_VSWING_1_2V 3
457
458struct edp_power_seq {
459 u16 t1_t3;
460 u16 t8;
461 u16 t9;
462 u16 t10;
463 u16 t11_t12;
464} __attribute__ ((packed));
465
466struct edp_link_params {
467 u8 rate:4;
468 u8 lanes:4;
469 u8 preemphasis:4;
470 u8 vswing:4;
471} __attribute__ ((packed));
472
473struct bdb_edp {
474 struct edp_power_seq power_seqs[16];
475 u32 color_depth;
476 u32 sdrrs_msa_timing_delay;
477 struct edp_link_params link_params[16];
478} __attribute__ ((packed));
479
434extern int psb_intel_init_bios(struct drm_device *dev); 480extern int psb_intel_init_bios(struct drm_device *dev);
435extern void psb_intel_destroy_bios(struct drm_device *dev); 481extern void psb_intel_destroy_bios(struct drm_device *dev);
436 482
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 5675d93b4205..32dba2ab53e1 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -299,17 +299,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
299 if (drm_connector_property_set_value(connector, property, 299 if (drm_connector_property_set_value(connector, property,
300 value)) 300 value))
301 goto set_prop_error; 301 goto set_prop_error;
302 else { 302 else
303#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 303 gma_backlight_set(encoder->dev, value);
304 struct backlight_device *psb_bd;
305
306 psb_bd = mdfld_get_backlight_device();
307 if (psb_bd) {
308 psb_bd->props.brightness = value;
309 mdfld_set_brightness(psb_bd);
310 }
311#endif
312 }
313 } 304 }
314set_prop_done: 305set_prop_done:
315 return 0; 306 return 0;
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 64d18a37da40..a97e38e284fa 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -118,20 +118,20 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
118 dev_priv->platform_rev_id); 118 dev_priv->platform_rev_id);
119} 119}
120 120
121struct vbt_header { 121struct mid_vbt_header {
122 u32 signature; 122 u32 signature;
123 u8 revision; 123 u8 revision;
124} __packed; 124} __packed;
125 125
126/* The same for r0 and r1 */ 126/* The same for r0 and r1 */
127struct vbt_r0 { 127struct vbt_r0 {
128 struct vbt_header vbt_header; 128 struct mid_vbt_header vbt_header;
129 u8 size; 129 u8 size;
130 u8 checksum; 130 u8 checksum;
131} __packed; 131} __packed;
132 132
133struct vbt_r10 { 133struct vbt_r10 {
134 struct vbt_header vbt_header; 134 struct mid_vbt_header vbt_header;
135 u8 checksum; 135 u8 checksum;
136 u16 size; 136 u16 size;
137 u8 panel_count; 137 u8 panel_count;
@@ -281,7 +281,7 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
281 struct drm_device *dev = dev_priv->dev; 281 struct drm_device *dev = dev_priv->dev;
282 u32 addr; 282 u32 addr;
283 u8 __iomem *vbt_virtual; 283 u8 __iomem *vbt_virtual;
284 struct vbt_header vbt_header; 284 struct mid_vbt_header vbt_header;
285 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); 285 struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
286 int ret = -1; 286 int ret = -1;
287 287
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 2eb3dc4e9c9b..69e51e903f35 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -252,7 +252,6 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
252 if (edid) { 252 if (edid) {
253 drm_mode_connector_update_edid_property(connector, edid); 253 drm_mode_connector_update_edid_property(connector, edid);
254 ret = drm_add_edid_modes(connector, edid); 254 ret = drm_add_edid_modes(connector, edid);
255 connector->display_info.raw_edid = NULL;
256 } 255 }
257 256
258 /* 257 /*
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index c430bd424681..ad0d6de938f3 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -166,8 +166,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
166 166
167 if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) { 167 if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
168 int max = bd->props.max_brightness; 168 int max = bd->props.max_brightness;
169 bd->props.brightness = bclp * max / 255; 169 gma_backlight_set(dev, bclp * max / 255);
170 backlight_update_status(bd);
171 } 170 }
172 171
173 asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID; 172 asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 7563cd51851a..b58c4701c4e8 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -290,6 +290,7 @@ static void psb_get_core_freq(struct drm_device *dev)
290 case 6: 290 case 6:
291 case 7: 291 case 7:
292 dev_priv->core_freq = 266; 292 dev_priv->core_freq = 266;
293 break;
293 default: 294 default:
294 dev_priv->core_freq = 0; 295 dev_priv->core_freq = 0;
295 } 296 }
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index b15282fdbf97..a7fd6c48b793 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -24,10 +24,10 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_global.h> 26#include <drm/drm_global.h>
27#include "gem_glue.h"
28#include <drm/gma_drm.h> 27#include <drm/gma_drm.h>
29#include "psb_reg.h" 28#include "psb_reg.h"
30#include "psb_intel_drv.h" 29#include "psb_intel_drv.h"
30#include "intel_bios.h"
31#include "gtt.h" 31#include "gtt.h"
32#include "power.h" 32#include "power.h"
33#include "opregion.h" 33#include "opregion.h"
@@ -613,6 +613,8 @@ struct drm_psb_private {
613 */ 613 */
614 struct backlight_device *backlight_device; 614 struct backlight_device *backlight_device;
615 struct drm_property *backlight_property; 615 struct drm_property *backlight_property;
616 bool backlight_enabled;
617 int backlight_level;
616 uint32_t blc_adj1; 618 uint32_t blc_adj1;
617 uint32_t blc_adj2; 619 uint32_t blc_adj2;
618 620
@@ -640,6 +642,19 @@ struct drm_psb_private {
640 int mdfld_panel_id; 642 int mdfld_panel_id;
641 643
642 bool dplla_96mhz; /* DPLL data from the VBT */ 644 bool dplla_96mhz; /* DPLL data from the VBT */
645
646 struct {
647 int rate;
648 int lanes;
649 int preemphasis;
650 int vswing;
651
652 bool initialized;
653 bool support;
654 int bpp;
655 struct edp_power_seq pps;
656 } edp;
657 uint8_t panel_type;
643}; 658};
644 659
645 660
@@ -796,6 +811,9 @@ extern int psb_fbdev_init(struct drm_device *dev);
796/* backlight.c */ 811/* backlight.c */
797int gma_backlight_init(struct drm_device *dev); 812int gma_backlight_init(struct drm_device *dev);
798void gma_backlight_exit(struct drm_device *dev); 813void gma_backlight_exit(struct drm_device *dev);
814void gma_backlight_disable(struct drm_device *dev);
815void gma_backlight_enable(struct drm_device *dev);
816void gma_backlight_set(struct drm_device *dev, int v);
799 817
800/* oaktrail_crtc.c */ 818/* oaktrail_crtc.c */
801extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs; 819extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index ebe1a28f60e1..90f2d11e686b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -29,10 +29,6 @@
29 * Display related stuff 29 * Display related stuff
30 */ 30 */
31 31
32/* store information about an Ixxx DVO */
33/* The i830->i865 use multiple DVOs with multiple i2cs */
34/* the i915, i945 have a single sDVO i2c bus - which is different */
35#define MAX_OUTPUTS 6
36/* maximum connectors per crtcs in the mode set */ 32/* maximum connectors per crtcs in the mode set */
37#define INTELFB_CONN_LIMIT 4 33#define INTELFB_CONN_LIMIT 4
38 34
@@ -69,6 +65,8 @@
69#define INTEL_OUTPUT_HDMI 6 65#define INTEL_OUTPUT_HDMI 6
70#define INTEL_OUTPUT_MIPI 7 66#define INTEL_OUTPUT_MIPI 7
71#define INTEL_OUTPUT_MIPI2 8 67#define INTEL_OUTPUT_MIPI2 8
68#define INTEL_OUTPUT_DISPLAYPORT 9
69#define INTEL_OUTPUT_EDP 10
72 70
73#define INTEL_DVO_CHIP_NONE 0 71#define INTEL_DVO_CHIP_NONE 0
74#define INTEL_DVO_CHIP_LVDS 1 72#define INTEL_DVO_CHIP_LVDS 1
@@ -133,6 +131,11 @@ struct psb_intel_encoder {
133 void (*hot_plug)(struct psb_intel_encoder *); 131 void (*hot_plug)(struct psb_intel_encoder *);
134 int crtc_mask; 132 int crtc_mask;
135 int clone_mask; 133 int clone_mask;
134 u32 ddi_select; /* Channel info */
135#define DDI0_SELECT 0x01
136#define DDI1_SELECT 0x02
137#define DP_MASK 0x8000
138#define DDI_MASK 0x03
136 void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */ 139 void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
137 140
138 /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's 141 /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
@@ -190,7 +193,6 @@ struct psb_intel_crtc {
190 u32 mode_flags; 193 u32 mode_flags;
191 194
192 bool active; 195 bool active;
193 bool crtc_enable;
194 196
195 /* Saved Crtc HW states */ 197 /* Saved Crtc HW states */
196 struct psb_intel_crtc_state *crtc_state; 198 struct psb_intel_crtc_state *crtc_state;
@@ -285,4 +287,20 @@ extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
285extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 287extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
286extern void gma_intel_teardown_gmbus(struct drm_device *dev); 288extern void gma_intel_teardown_gmbus(struct drm_device *dev);
287 289
290/* DP support */
291extern void cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg);
292extern void cdv_intel_dp_set_m_n(struct drm_crtc *crtc,
293 struct drm_display_mode *mode,
294 struct drm_display_mode *adjusted_mode);
295
296extern void psb_intel_attach_force_audio_property(struct drm_connector *connector);
297extern void psb_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
298
299extern int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val);
300extern int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val);
301extern void cdv_sb_reset(struct drm_device *dev);
302
303extern void cdv_intel_attach_force_audio_property(struct drm_connector *connector);
304extern void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector);
305
288#endif /* __INTEL_DRV_H__ */ 306#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 37adc9edf974..2a4c3a9e33e3 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -630,17 +630,8 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
630 property, 630 property,
631 value)) 631 value))
632 goto set_prop_error; 632 goto set_prop_error;
633 else { 633 else
634#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 634 gma_backlight_set(encoder->dev, value);
635 struct drm_psb_private *devp =
636 encoder->dev->dev_private;
637 struct backlight_device *bd = devp->backlight_device;
638 if (bd) {
639 bd->props.brightness = value;
640 backlight_update_status(bd);
641 }
642#endif
643 }
644 } else if (!strcmp(property->name, "DPMS")) { 635 } else if (!strcmp(property->name, "DPMS")) {
645 struct drm_encoder_helper_funcs *hfuncs 636 struct drm_encoder_helper_funcs *hfuncs
646 = encoder->helper_private; 637 = encoder->helper_private;
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 8e8c8efb0a89..d914719c4b60 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -173,15 +173,46 @@
173#define PP_SEQUENCE_ON (1 << 28) 173#define PP_SEQUENCE_ON (1 << 28)
174#define PP_SEQUENCE_OFF (2 << 28) 174#define PP_SEQUENCE_OFF (2 << 28)
175#define PP_SEQUENCE_MASK 0x30000000 175#define PP_SEQUENCE_MASK 0x30000000
176#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
177#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
178#define PP_SEQUENCE_STATE_MASK 0x0000000f
179
176#define PP_CONTROL 0x61204 180#define PP_CONTROL 0x61204
177#define POWER_TARGET_ON (1 << 0) 181#define POWER_TARGET_ON (1 << 0)
178 182#define PANEL_UNLOCK_REGS (0xabcd << 16)
183#define PANEL_UNLOCK_MASK (0xffff << 16)
184#define EDP_FORCE_VDD (1 << 3)
185#define EDP_BLC_ENABLE (1 << 2)
186#define PANEL_POWER_RESET (1 << 1)
187#define PANEL_POWER_OFF (0 << 0)
188#define PANEL_POWER_ON (1 << 0)
189
190/* Poulsbo/Oaktrail */
179#define LVDSPP_ON 0x61208 191#define LVDSPP_ON 0x61208
180#define LVDSPP_OFF 0x6120c 192#define LVDSPP_OFF 0x6120c
181#define PP_CYCLE 0x61210 193#define PP_CYCLE 0x61210
182 194
195/* Cedartrail */
183#define PP_ON_DELAYS 0x61208 /* Cedartrail */ 196#define PP_ON_DELAYS 0x61208 /* Cedartrail */
197#define PANEL_PORT_SELECT_MASK (3 << 30)
198#define PANEL_PORT_SELECT_LVDS (0 << 30)
199#define PANEL_PORT_SELECT_EDP (1 << 30)
200#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
201#define PANEL_POWER_UP_DELAY_SHIFT 16
202#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
203#define PANEL_LIGHT_ON_DELAY_SHIFT 0
204
184#define PP_OFF_DELAYS 0x6120c /* Cedartrail */ 205#define PP_OFF_DELAYS 0x6120c /* Cedartrail */
206#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
207#define PANEL_POWER_DOWN_DELAY_SHIFT 16
208#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
209#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
210
211#define PP_DIVISOR 0x61210 /* Cedartrail */
212#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
213#define PP_REFERENCE_DIVIDER_SHIFT 8
214#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
215#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
185 216
186#define PFIT_CONTROL 0x61230 217#define PFIT_CONTROL 0x61230
187#define PFIT_ENABLE (1 << 31) 218#define PFIT_ENABLE (1 << 31)
@@ -1282,6 +1313,10 @@ No status bits are changed.
1282# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */ 1313# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* Fixed value on CDV */
1283# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11) 1314# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
1284# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) 1315# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6)
1316# define DPUNIT_PIPEB_GATE_DISABLE (1 << 30)
1317# define DPUNIT_PIPEA_GATE_DISABLE (1 << 25)
1318# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24)
1319# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13)
1285 1320
1286#define RAMCLK_GATE_D 0x6210 1321#define RAMCLK_GATE_D 0x6210
1287 1322
@@ -1347,5 +1382,165 @@ No status bits are changed.
1347#define LANE_PLL_ENABLE (0x3 << 20) 1382#define LANE_PLL_ENABLE (0x3 << 20)
1348#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21)) 1383#define LANE_PLL_PIPE(p) (((p) == 0) ? (1 << 21) : (0 << 21))
1349 1384
1385#define DP_B 0x64100
1386#define DP_C 0x64200
1387
1388#define DP_PORT_EN (1 << 31)
1389#define DP_PIPEB_SELECT (1 << 30)
1390#define DP_PIPE_MASK (1 << 30)
1391
1392/* Link training mode - select a suitable mode for each stage */
1393#define DP_LINK_TRAIN_PAT_1 (0 << 28)
1394#define DP_LINK_TRAIN_PAT_2 (1 << 28)
1395#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
1396#define DP_LINK_TRAIN_OFF (3 << 28)
1397#define DP_LINK_TRAIN_MASK (3 << 28)
1398#define DP_LINK_TRAIN_SHIFT 28
1399
1400/* Signal voltages. These are mostly controlled by the other end */
1401#define DP_VOLTAGE_0_4 (0 << 25)
1402#define DP_VOLTAGE_0_6 (1 << 25)
1403#define DP_VOLTAGE_0_8 (2 << 25)
1404#define DP_VOLTAGE_1_2 (3 << 25)
1405#define DP_VOLTAGE_MASK (7 << 25)
1406#define DP_VOLTAGE_SHIFT 25
1407
1408/* Signal pre-emphasis levels, like voltages, the other end tells us what
1409 * they want
1410 */
1411#define DP_PRE_EMPHASIS_0 (0 << 22)
1412#define DP_PRE_EMPHASIS_3_5 (1 << 22)
1413#define DP_PRE_EMPHASIS_6 (2 << 22)
1414#define DP_PRE_EMPHASIS_9_5 (3 << 22)
1415#define DP_PRE_EMPHASIS_MASK (7 << 22)
1416#define DP_PRE_EMPHASIS_SHIFT 22
1417
1418/* How many wires to use. I guess 3 was too hard */
1419#define DP_PORT_WIDTH_1 (0 << 19)
1420#define DP_PORT_WIDTH_2 (1 << 19)
1421#define DP_PORT_WIDTH_4 (3 << 19)
1422#define DP_PORT_WIDTH_MASK (7 << 19)
1423
1424/* Mystic DPCD version 1.1 special mode */
1425#define DP_ENHANCED_FRAMING (1 << 18)
1426
1427/** locked once port is enabled */
1428#define DP_PORT_REVERSAL (1 << 15)
1429
1430/** sends the clock on lane 15 of the PEG for debug */
1431#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1432
1433#define DP_SCRAMBLING_DISABLE (1 << 12)
1434#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
1435
1436/** limit RGB values to avoid confusing TVs */
1437#define DP_COLOR_RANGE_16_235 (1 << 8)
1438
1439/** Turn on the audio link */
1440#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
1441
1442/** vs and hs sync polarity */
1443#define DP_SYNC_VS_HIGH (1 << 4)
1444#define DP_SYNC_HS_HIGH (1 << 3)
1445
1446/** A fantasy */
1447#define DP_DETECTED (1 << 2)
1448
1449/** The aux channel provides a way to talk to the
1450 * signal sink for DDC etc. Max packet size supported
1451 * is 20 bytes in each direction, hence the 5 fixed
1452 * data registers
1453 */
1454#define DPB_AUX_CH_CTL 0x64110
1455#define DPB_AUX_CH_DATA1 0x64114
1456#define DPB_AUX_CH_DATA2 0x64118
1457#define DPB_AUX_CH_DATA3 0x6411c
1458#define DPB_AUX_CH_DATA4 0x64120
1459#define DPB_AUX_CH_DATA5 0x64124
1460
1461#define DPC_AUX_CH_CTL 0x64210
1462#define DPC_AUX_CH_DATA1 0x64214
1463#define DPC_AUX_CH_DATA2 0x64218
1464#define DPC_AUX_CH_DATA3 0x6421c
1465#define DPC_AUX_CH_DATA4 0x64220
1466#define DPC_AUX_CH_DATA5 0x64224
1467
1468#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
1469#define DP_AUX_CH_CTL_DONE (1 << 30)
1470#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
1471#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
1472#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
1473#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
1474#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
1475#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
1476#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
1477#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
1478#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
1479#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
1480#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
1481#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
1482#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
1483#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
1484#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
1485#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
1486#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
1487#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
1488#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
1489
1490/*
1491 * Computing GMCH M and N values for the Display Port link
1492 *
1493 * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
1494 *
1495 * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
1496 *
1497 * The GMCH value is used internally
1498 *
1499 * bytes_per_pixel is the number of bytes coming out of the plane,
1500 * which is after the LUTs, so we want the bytes for our color format.
1501 * For our current usage, this is always 3, one byte for R, G and B.
1502 */
1503
1504#define _PIPEA_GMCH_DATA_M 0x70050
1505#define _PIPEB_GMCH_DATA_M 0x71050
1506
1507/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
1508#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
1509#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
1510
1511#define PIPE_GMCH_DATA_M_MASK (0xffffff)
1512
1513#define _PIPEA_GMCH_DATA_N 0x70054
1514#define _PIPEB_GMCH_DATA_N 0x71054
1515#define PIPE_GMCH_DATA_N_MASK (0xffffff)
1516
1517/*
1518 * Computing Link M and N values for the Display Port link
1519 *
1520 * Link M / N = pixel_clock / ls_clk
1521 *
1522 * (the DP spec calls pixel_clock the 'strm_clk')
1523 *
1524 * The Link value is transmitted in the Main Stream
1525 * Attributes and VB-ID.
1526 */
1527
1528#define _PIPEA_DP_LINK_M 0x70060
1529#define _PIPEB_DP_LINK_M 0x71060
1530#define PIPEA_DP_LINK_M_MASK (0xffffff)
1531
1532#define _PIPEA_DP_LINK_N 0x70064
1533#define _PIPEB_DP_LINK_N 0x71064
1534#define PIPEA_DP_LINK_N_MASK (0xffffff)
1535
1536#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
1537#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
1538#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
1539#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
1540
1541#define PIPE_BPC_MASK (7 << 5)
1542#define PIPE_8BPC (0 << 5)
1543#define PIPE_10BPC (1 << 5)
1544#define PIPE_6BPC (2 << 5)
1350 1545
1351#endif 1546#endif
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index c148d92229fd..fc9292705dbf 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1291,7 +1291,6 @@ psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
1291 1291
1292 return drm_get_edid(connector, 1292 return drm_get_edid(connector,
1293 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 1293 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1294 return NULL;
1295} 1294}
1296 1295
1297static enum drm_connector_status 1296static enum drm_connector_status
@@ -1342,7 +1341,6 @@ psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1342 } 1341 }
1343 } else 1342 } else
1344 status = connector_status_disconnected; 1343 status = connector_status_disconnected;
1345 connector->display_info.raw_edid = NULL;
1346 kfree(edid); 1344 kfree(edid);
1347 } 1345 }
1348 1346
@@ -1403,7 +1401,6 @@ psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
1403 ret = connector_status_disconnected; 1401 ret = connector_status_disconnected;
1404 else 1402 else
1405 ret = connector_status_connected; 1403 ret = connector_status_connected;
1406 connector->display_info.raw_edid = NULL;
1407 kfree(edid); 1404 kfree(edid);
1408 } else 1405 } else
1409 ret = connector_status_connected; 1406 ret = connector_status_connected;
@@ -1452,7 +1449,6 @@ static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1452 drm_add_edid_modes(connector, edid); 1449 drm_add_edid_modes(connector, edid);
1453 } 1450 }
1454 1451
1455 connector->display_info.raw_edid = NULL;
1456 kfree(edid); 1452 kfree(edid);
1457 } 1453 }
1458} 1454}
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 36d952280c50..599099fe76e3 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -427,15 +427,10 @@ static int ch7006_remove(struct i2c_client *client)
427 return 0; 427 return 0;
428} 428}
429 429
430static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg) 430static int ch7006_resume(struct device *dev)
431{ 431{
432 ch7006_dbg(client, "\n"); 432 struct i2c_client *client = to_i2c_client(dev);
433
434 return 0;
435}
436 433
437static int ch7006_resume(struct i2c_client *client)
438{
439 ch7006_dbg(client, "\n"); 434 ch7006_dbg(client, "\n");
440 435
441 ch7006_write(client, 0x3d, 0x0); 436 ch7006_write(client, 0x3d, 0x0);
@@ -499,15 +494,18 @@ static struct i2c_device_id ch7006_ids[] = {
499}; 494};
500MODULE_DEVICE_TABLE(i2c, ch7006_ids); 495MODULE_DEVICE_TABLE(i2c, ch7006_ids);
501 496
497static const struct dev_pm_ops ch7006_pm_ops = {
498 .resume = ch7006_resume,
499};
500
502static struct drm_i2c_encoder_driver ch7006_driver = { 501static struct drm_i2c_encoder_driver ch7006_driver = {
503 .i2c_driver = { 502 .i2c_driver = {
504 .probe = ch7006_probe, 503 .probe = ch7006_probe,
505 .remove = ch7006_remove, 504 .remove = ch7006_remove,
506 .suspend = ch7006_suspend,
507 .resume = ch7006_resume,
508 505
509 .driver = { 506 .driver = {
510 .name = "ch7006", 507 .name = "ch7006",
508 .pm = &ch7006_pm_ops,
511 }, 509 },
512 510
513 .id_table = ch7006_ids, 511 .id_table = ch7006_ids,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b0bacdba6d7e..0f2c5493242b 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,6 +40,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
40 dvo_ivch.o \ 40 dvo_ivch.o \
41 dvo_tfp410.o \ 41 dvo_tfp410.o \
42 dvo_sil164.o \ 42 dvo_sil164.o \
43 dvo_ns2501.o \
43 i915_gem_dmabuf.o 44 i915_gem_dmabuf.o
44 45
45i915-$(CONFIG_COMPAT) += i915_ioc32.o 46i915-$(CONFIG_COMPAT) += i915_ioc32.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 573de82c9f5a..33a62ad80100 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -57,13 +57,12 @@ struct intel_dvo_dev_ops {
57 void (*create_resources)(struct intel_dvo_device *dvo); 57 void (*create_resources)(struct intel_dvo_device *dvo);
58 58
59 /* 59 /*
60 * Turn on/off output or set intermediate power levels if available. 60 * Turn on/off output.
61 * 61 *
62 * Unsupported intermediate modes drop to the lower power setting. 62 * Because none of our dvo drivers support an intermediate power levels,
63 * If the mode is DPMSModeOff, the output must be disabled, 63 * we don't expose this in the interfac.
64 * as the DPLL may be disabled afterwards.
65 */ 64 */
66 void (*dpms)(struct intel_dvo_device *dvo, int mode); 65 void (*dpms)(struct intel_dvo_device *dvo, bool enable);
67 66
68 /* 67 /*
69 * Callback for testing a video mode for a given output. 68 * Callback for testing a video mode for a given output.
@@ -114,6 +113,12 @@ struct intel_dvo_dev_ops {
114 */ 113 */
115 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo); 114 enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
116 115
116 /*
117 * Probe the current hw status, returning true if the connected output
118 * is active.
119 */
120 bool (*get_hw_state)(struct intel_dvo_device *dev);
121
117 /** 122 /**
118 * Query the device for the modes it provides. 123 * Query the device for the modes it provides.
119 * 124 *
@@ -139,5 +144,6 @@ extern struct intel_dvo_dev_ops ch7xxx_ops;
139extern struct intel_dvo_dev_ops ivch_ops; 144extern struct intel_dvo_dev_ops ivch_ops;
140extern struct intel_dvo_dev_ops tfp410_ops; 145extern struct intel_dvo_dev_ops tfp410_ops;
141extern struct intel_dvo_dev_ops ch7017_ops; 146extern struct intel_dvo_dev_ops ch7017_ops;
147extern struct intel_dvo_dev_ops ns2501_ops;
142 148
143#endif /* _INTEL_DVO_H */ 149#endif /* _INTEL_DVO_H */
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 1ca799a1e1fc..86b27d1d90c2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -163,7 +163,7 @@ struct ch7017_priv {
163}; 163};
164 164
165static void ch7017_dump_regs(struct intel_dvo_device *dvo); 165static void ch7017_dump_regs(struct intel_dvo_device *dvo);
166static void ch7017_dpms(struct intel_dvo_device *dvo, int mode); 166static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
167 167
168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val) 168static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
169{ 169{
@@ -309,7 +309,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED | 309 lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
310 (mode->hdisplay & 0x0700) >> 8; 310 (mode->hdisplay & 0x0700) >> 8;
311 311
312 ch7017_dpms(dvo, DRM_MODE_DPMS_OFF); 312 ch7017_dpms(dvo, false);
313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT, 313 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
314 horizontal_active_pixel_input); 314 horizontal_active_pixel_input);
315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT, 315 ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
@@ -331,7 +331,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
331} 331}
332 332
333/* set the CH7017 power state */ 333/* set the CH7017 power state */
334static void ch7017_dpms(struct intel_dvo_device *dvo, int mode) 334static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
335{ 335{
336 uint8_t val; 336 uint8_t val;
337 337
@@ -345,7 +345,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
345 CH7017_DAC3_POWER_DOWN | 345 CH7017_DAC3_POWER_DOWN |
346 CH7017_TV_POWER_DOWN_EN); 346 CH7017_TV_POWER_DOWN_EN);
347 347
348 if (mode == DRM_MODE_DPMS_ON) { 348 if (enable) {
349 /* Turn on the LVDS */ 349 /* Turn on the LVDS */
350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, 350 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
351 val & ~CH7017_LVDS_POWER_DOWN_EN); 351 val & ~CH7017_LVDS_POWER_DOWN_EN);
@@ -359,6 +359,18 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
359 msleep(20); 359 msleep(20);
360} 360}
361 361
362static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
363{
364 uint8_t val;
365
366 ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
367
368 if (val & CH7017_LVDS_POWER_DOWN_EN)
369 return false;
370 else
371 return true;
372}
373
362static void ch7017_dump_regs(struct intel_dvo_device *dvo) 374static void ch7017_dump_regs(struct intel_dvo_device *dvo)
363{ 375{
364 uint8_t val; 376 uint8_t val;
@@ -396,6 +408,7 @@ struct intel_dvo_dev_ops ch7017_ops = {
396 .mode_valid = ch7017_mode_valid, 408 .mode_valid = ch7017_mode_valid,
397 .mode_set = ch7017_mode_set, 409 .mode_set = ch7017_mode_set,
398 .dpms = ch7017_dpms, 410 .dpms = ch7017_dpms,
411 .get_hw_state = ch7017_get_hw_state,
399 .dump_regs = ch7017_dump_regs, 412 .dump_regs = ch7017_dump_regs,
400 .destroy = ch7017_destroy, 413 .destroy = ch7017_destroy,
401}; 414};
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 4a036600e806..38f3a6cb8c7d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -289,14 +289,26 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
289} 289}
290 290
291/* set the CH7xxx power state */ 291/* set the CH7xxx power state */
292static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode) 292static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
293{ 293{
294 if (mode == DRM_MODE_DPMS_ON) 294 if (enable)
295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP); 295 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
296 else 296 else
297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD); 297 ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
298} 298}
299 299
300static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
301{
302 u8 val;
303
304 ch7xxx_readb(dvo, CH7xxx_PM, &val);
305
306 if (val & CH7xxx_PM_FPD)
307 return false;
308 else
309 return true;
310}
311
300static void ch7xxx_dump_regs(struct intel_dvo_device *dvo) 312static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
301{ 313{
302 int i; 314 int i;
@@ -326,6 +338,7 @@ struct intel_dvo_dev_ops ch7xxx_ops = {
326 .mode_valid = ch7xxx_mode_valid, 338 .mode_valid = ch7xxx_mode_valid,
327 .mode_set = ch7xxx_mode_set, 339 .mode_set = ch7xxx_mode_set,
328 .dpms = ch7xxx_dpms, 340 .dpms = ch7xxx_dpms,
341 .get_hw_state = ch7xxx_get_hw_state,
329 .dump_regs = ch7xxx_dump_regs, 342 .dump_regs = ch7xxx_dump_regs,
330 .destroy = ch7xxx_destroy, 343 .destroy = ch7xxx_destroy,
331}; 344};
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index 04f2893d5e3c..baaf65bf0bdd 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -288,7 +288,7 @@ static enum drm_mode_status ivch_mode_valid(struct intel_dvo_device *dvo,
288} 288}
289 289
290/** Sets the power state of the panel connected to the ivch */ 290/** Sets the power state of the panel connected to the ivch */
291static void ivch_dpms(struct intel_dvo_device *dvo, int mode) 291static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
292{ 292{
293 int i; 293 int i;
294 uint16_t vr01, vr30, backlight; 294 uint16_t vr01, vr30, backlight;
@@ -297,13 +297,13 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
297 if (!ivch_read(dvo, VR01, &vr01)) 297 if (!ivch_read(dvo, VR01, &vr01))
298 return; 298 return;
299 299
300 if (mode == DRM_MODE_DPMS_ON) 300 if (enable)
301 backlight = 1; 301 backlight = 1;
302 else 302 else
303 backlight = 0; 303 backlight = 0;
304 ivch_write(dvo, VR80, backlight); 304 ivch_write(dvo, VR80, backlight);
305 305
306 if (mode == DRM_MODE_DPMS_ON) 306 if (enable)
307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE; 307 vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
308 else 308 else
309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE); 309 vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
@@ -315,7 +315,7 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
315 if (!ivch_read(dvo, VR30, &vr30)) 315 if (!ivch_read(dvo, VR30, &vr30))
316 break; 316 break;
317 317
318 if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON)) 318 if (((vr30 & VR30_PANEL_ON) != 0) == enable)
319 break; 319 break;
320 udelay(1000); 320 udelay(1000);
321 } 321 }
@@ -323,6 +323,20 @@ static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
323 udelay(16 * 1000); 323 udelay(16 * 1000);
324} 324}
325 325
326static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
327{
328 uint16_t vr01;
329
330 /* Set the new power state of the panel. */
331 if (!ivch_read(dvo, VR01, &vr01))
332 return false;
333
334 if (vr01 & VR01_LCD_ENABLE)
335 return true;
336 else
337 return false;
338}
339
326static void ivch_mode_set(struct intel_dvo_device *dvo, 340static void ivch_mode_set(struct intel_dvo_device *dvo,
327 struct drm_display_mode *mode, 341 struct drm_display_mode *mode,
328 struct drm_display_mode *adjusted_mode) 342 struct drm_display_mode *adjusted_mode)
@@ -413,6 +427,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
413struct intel_dvo_dev_ops ivch_ops = { 427struct intel_dvo_dev_ops ivch_ops = {
414 .init = ivch_init, 428 .init = ivch_init,
415 .dpms = ivch_dpms, 429 .dpms = ivch_dpms,
430 .get_hw_state = ivch_get_hw_state,
416 .mode_valid = ivch_mode_valid, 431 .mode_valid = ivch_mode_valid,
417 .mode_set = ivch_mode_set, 432 .mode_set = ivch_mode_set,
418 .detect = ivch_detect, 433 .detect = ivch_detect,
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
new file mode 100644
index 000000000000..c4a255be6979
--- /dev/null
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -0,0 +1,588 @@
1/*
2 *
3 * Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
4 *
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#include "dvo.h"
30#include "i915_reg.h"
31#include "i915_drv.h"
32
33#define NS2501_VID 0x1305
34#define NS2501_DID 0x6726
35
36#define NS2501_VID_LO 0x00
37#define NS2501_VID_HI 0x01
38#define NS2501_DID_LO 0x02
39#define NS2501_DID_HI 0x03
40#define NS2501_REV 0x04
41#define NS2501_RSVD 0x05
42#define NS2501_FREQ_LO 0x06
43#define NS2501_FREQ_HI 0x07
44
45#define NS2501_REG8 0x08
46#define NS2501_8_VEN (1<<5)
47#define NS2501_8_HEN (1<<4)
48#define NS2501_8_DSEL (1<<3)
49#define NS2501_8_BPAS (1<<2)
50#define NS2501_8_RSVD (1<<1)
51#define NS2501_8_PD (1<<0)
52
53#define NS2501_REG9 0x09
54#define NS2501_9_VLOW (1<<7)
55#define NS2501_9_MSEL_MASK (0x7<<4)
56#define NS2501_9_TSEL (1<<3)
57#define NS2501_9_RSEN (1<<2)
58#define NS2501_9_RSVD (1<<1)
59#define NS2501_9_MDI (1<<0)
60
61#define NS2501_REGC 0x0c
62
63struct ns2501_priv {
64 //I2CDevRec d;
65 bool quiet;
66 int reg_8_shadow;
67 int reg_8_set;
68 // Shadow registers for i915
69 int dvoc;
70 int pll_a;
71 int srcdim;
72 int fw_blc;
73};
74
75#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
76
77/*
78 * For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
79 * laptops does not react on the i2c bus unless
80 * both the PLL is running and the display is configured in its native
81 * resolution.
82 * This function forces the DVO on, and stores the registers it touches.
83 * Afterwards, registers are restored to regular values.
84 *
85 * This is pretty much a hack, though it works.
86 * Without that, ns2501_readb and ns2501_writeb fail
87 * when switching the resolution.
88 */
89
90static void enable_dvo(struct intel_dvo_device *dvo)
91{
92 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
93 struct i2c_adapter *adapter = dvo->i2c_bus;
94 struct intel_gmbus *bus = container_of(adapter,
95 struct intel_gmbus,
96 adapter);
97 struct drm_i915_private *dev_priv = bus->dev_priv;
98
99 DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
100
101 ns->dvoc = I915_READ(DVO_C);
102 ns->pll_a = I915_READ(_DPLL_A);
103 ns->srcdim = I915_READ(DVOC_SRCDIM);
104 ns->fw_blc = I915_READ(FW_BLC);
105
106 I915_WRITE(DVOC, 0x10004084);
107 I915_WRITE(_DPLL_A, 0xd0820000);
108 I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
109 I915_WRITE(FW_BLC, 0x1080304);
110
111 I915_WRITE(DVOC, 0x90004084);
112}
113
114/*
115 * Restore the I915 registers modified by the above
116 * trigger function.
117 */
118static void restore_dvo(struct intel_dvo_device *dvo)
119{
120 struct i2c_adapter *adapter = dvo->i2c_bus;
121 struct intel_gmbus *bus = container_of(adapter,
122 struct intel_gmbus,
123 adapter);
124 struct drm_i915_private *dev_priv = bus->dev_priv;
125 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
126
127 I915_WRITE(DVOC, ns->dvoc);
128 I915_WRITE(_DPLL_A, ns->pll_a);
129 I915_WRITE(DVOC_SRCDIM, ns->srcdim);
130 I915_WRITE(FW_BLC, ns->fw_blc);
131}
132
133/*
134** Read a register from the ns2501.
135** Returns true if successful, false otherwise.
136** If it returns false, it might be wise to enable the
137** DVO with the above function.
138*/
139static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
140{
141 struct ns2501_priv *ns = dvo->dev_priv;
142 struct i2c_adapter *adapter = dvo->i2c_bus;
143 u8 out_buf[2];
144 u8 in_buf[2];
145
146 struct i2c_msg msgs[] = {
147 {
148 .addr = dvo->slave_addr,
149 .flags = 0,
150 .len = 1,
151 .buf = out_buf,
152 },
153 {
154 .addr = dvo->slave_addr,
155 .flags = I2C_M_RD,
156 .len = 1,
157 .buf = in_buf,
158 }
159 };
160
161 out_buf[0] = addr;
162 out_buf[1] = 0;
163
164 if (i2c_transfer(adapter, msgs, 2) == 2) {
165 *ch = in_buf[0];
166 return true;
167 };
168
169 if (!ns->quiet) {
170 DRM_DEBUG_KMS
171 ("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
172 adapter->name, dvo->slave_addr);
173 }
174
175 return false;
176}
177
178/*
179** Write a register to the ns2501.
180** Returns true if successful, false otherwise.
181** If it returns false, it might be wise to enable the
182** DVO with the above function.
183*/
184static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
185{
186 struct ns2501_priv *ns = dvo->dev_priv;
187 struct i2c_adapter *adapter = dvo->i2c_bus;
188 uint8_t out_buf[2];
189
190 struct i2c_msg msg = {
191 .addr = dvo->slave_addr,
192 .flags = 0,
193 .len = 2,
194 .buf = out_buf,
195 };
196
197 out_buf[0] = addr;
198 out_buf[1] = ch;
199
200 if (i2c_transfer(adapter, &msg, 1) == 1) {
201 return true;
202 }
203
204 if (!ns->quiet) {
205 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
206 addr, adapter->name, dvo->slave_addr);
207 }
208
209 return false;
210}
211
212/* National Semiconductor 2501 driver for chip on i2c bus
213 * scan for the chip on the bus.
214 * Hope the VBIOS initialized the PLL correctly so we can
215 * talk to it. If not, it will not be seen and not detected.
216 * Bummer!
217 */
218static bool ns2501_init(struct intel_dvo_device *dvo,
219 struct i2c_adapter *adapter)
220{
221 /* this will detect the NS2501 chip on the specified i2c bus */
222 struct ns2501_priv *ns;
223 unsigned char ch;
224
225 ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
226 if (ns == NULL)
227 return false;
228
229 dvo->i2c_bus = adapter;
230 dvo->dev_priv = ns;
231 ns->quiet = true;
232
233 if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
234 goto out;
235
236 if (ch != (NS2501_VID & 0xff)) {
237 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
238 ch, adapter->name, dvo->slave_addr);
239 goto out;
240 }
241
242 if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
243 goto out;
244
245 if (ch != (NS2501_DID & 0xff)) {
246 DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
247 ch, adapter->name, dvo->slave_addr);
248 goto out;
249 }
250 ns->quiet = false;
251 ns->reg_8_set = 0;
252 ns->reg_8_shadow =
253 NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
254
255 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
256 return true;
257
258out:
259 kfree(ns);
260 return false;
261}
262
263static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
264{
265 /*
266 * This is a Laptop display, it doesn't have hotplugging.
267 * Even if not, the detection bit of the 2501 is unreliable as
268 * it only works for some display types.
269 * It is even more unreliable as the PLL must be active for
270 * allowing reading from the chiop.
271 */
272 return connector_status_connected;
273}
274
275static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
276 struct drm_display_mode *mode)
277{
278 DRM_DEBUG_KMS
279 ("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
280 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
281 mode->vtotal);
282
283 /*
284 * Currently, these are all the modes I have data from.
285 * More might exist. Unclear how to find the native resolution
286 * of the panel in here so we could always accept it
287 * by disabling the scaler.
288 */
289 if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
290 (mode->hdisplay == 640 && mode->vdisplay == 480) ||
291 (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
292 return MODE_OK;
293 } else {
294 return MODE_ONE_SIZE; /* Is this a reasonable error? */
295 }
296}
297
298static void ns2501_mode_set(struct intel_dvo_device *dvo,
299 struct drm_display_mode *mode,
300 struct drm_display_mode *adjusted_mode)
301{
302 bool ok;
303 bool restore = false;
304 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
305
306 DRM_DEBUG_KMS
307 ("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
308 __FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
309 mode->vtotal);
310
311 /*
312 * Where do I find the native resolution for which scaling is not required???
313 *
314 * First trigger the DVO on as otherwise the chip does not appear on the i2c
315 * bus.
316 */
317 do {
318 ok = true;
319
320 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
321 /* mode 277 */
322 ns->reg_8_shadow &= ~NS2501_8_BPAS;
323 DRM_DEBUG_KMS("%s: switching to 800x600\n",
324 __FUNCTION__);
325
326 /*
327 * No, I do not know where this data comes from.
328 * It is just what the video bios left in the DVO, so
329 * I'm just copying it here over.
330 * This also means that I cannot support any other modes
331 * except the ones supported by the bios.
332 */
333 ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
334 ok &= ns2501_writeb(dvo, 0x1b, 0x19);
335 ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
336 ok &= ns2501_writeb(dvo, 0x1d, 0x02);
337
338 ok &= ns2501_writeb(dvo, 0x34, 0x03);
339 ok &= ns2501_writeb(dvo, 0x35, 0xff);
340
341 ok &= ns2501_writeb(dvo, 0x80, 0x27);
342 ok &= ns2501_writeb(dvo, 0x81, 0x03);
343 ok &= ns2501_writeb(dvo, 0x82, 0x41);
344 ok &= ns2501_writeb(dvo, 0x83, 0x05);
345
346 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
347 ok &= ns2501_writeb(dvo, 0x8e, 0x04);
348 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
349
350 ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
351 ok &= ns2501_writeb(dvo, 0x91, 0x07);
352 ok &= ns2501_writeb(dvo, 0x94, 0x00);
353 ok &= ns2501_writeb(dvo, 0x95, 0x00);
354
355 ok &= ns2501_writeb(dvo, 0x96, 0x00);
356
357 ok &= ns2501_writeb(dvo, 0x99, 0x00);
358 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
359
360 ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
361 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
362 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
363 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
364
365 ok &= ns2501_writeb(dvo, 0xa4, 0x80);
366
367 ok &= ns2501_writeb(dvo, 0xb6, 0x00);
368
369 ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
370 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
371
372 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
373 ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
374
375 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
376 ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
377
378 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
379 ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
380
381 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
382 ok &= ns2501_writeb(dvo, 0xc7, 0x73);
383 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
384
385 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
386 /* mode 274 */
387 DRM_DEBUG_KMS("%s: switching to 640x480\n",
388 __FUNCTION__);
389 /*
390 * No, I do not know where this data comes from.
391 * It is just what the video bios left in the DVO, so
392 * I'm just copying it here over.
393 * This also means that I cannot support any other modes
394 * except the ones supported by the bios.
395 */
396 ns->reg_8_shadow &= ~NS2501_8_BPAS;
397
398 ok &= ns2501_writeb(dvo, 0x11, 0xa0);
399 ok &= ns2501_writeb(dvo, 0x1b, 0x11);
400 ok &= ns2501_writeb(dvo, 0x1c, 0x54);
401 ok &= ns2501_writeb(dvo, 0x1d, 0x03);
402
403 ok &= ns2501_writeb(dvo, 0x34, 0x03);
404 ok &= ns2501_writeb(dvo, 0x35, 0xff);
405
406 ok &= ns2501_writeb(dvo, 0x80, 0xff);
407 ok &= ns2501_writeb(dvo, 0x81, 0x07);
408 ok &= ns2501_writeb(dvo, 0x82, 0x3d);
409 ok &= ns2501_writeb(dvo, 0x83, 0x05);
410
411 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
412 ok &= ns2501_writeb(dvo, 0x8e, 0x10);
413 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
414
415 ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
416 ok &= ns2501_writeb(dvo, 0x91, 0x07);
417 ok &= ns2501_writeb(dvo, 0x94, 0x00);
418 ok &= ns2501_writeb(dvo, 0x95, 0x00);
419
420 ok &= ns2501_writeb(dvo, 0x96, 0x05);
421
422 ok &= ns2501_writeb(dvo, 0x99, 0x00);
423 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
424
425 ok &= ns2501_writeb(dvo, 0x9c, 0x24);
426 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
427 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
428 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
429
430 ok &= ns2501_writeb(dvo, 0xa4, 0x84);
431
432 ok &= ns2501_writeb(dvo, 0xb6, 0x09);
433
434 ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
435 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
436
437 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
438 ok &= ns2501_writeb(dvo, 0xc1, 0x90);
439
440 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
441 ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
442
443 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
444 ok &= ns2501_writeb(dvo, 0xc5, 0x16);
445
446 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
447 ok &= ns2501_writeb(dvo, 0xc7, 0x02);
448 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
449
450 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
451 /* mode 280 */
452 DRM_DEBUG_KMS("%s: switching to 1024x768\n",
453 __FUNCTION__);
454 /*
455 * This might or might not work, actually. I'm silently
456 * assuming here that the native panel resolution is
457 * 1024x768. If not, then this leaves the scaler disabled
458 * generating a picture that is likely not the expected.
459 *
460 * Problem is that I do not know where to take the panel
461 * dimensions from.
462 *
463 * Enable the bypass, scaling not required.
464 *
465 * The scaler registers are irrelevant here....
466 *
467 */
468 ns->reg_8_shadow |= NS2501_8_BPAS;
469 ok &= ns2501_writeb(dvo, 0x37, 0x44);
470 } else {
471 /*
472 * Data not known. Bummer!
473 * Hopefully, the code should not go here
474 * as mode_OK delivered no other modes.
475 */
476 ns->reg_8_shadow |= NS2501_8_BPAS;
477 }
478 ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
479
480 if (!ok) {
481 if (restore)
482 restore_dvo(dvo);
483 enable_dvo(dvo);
484 restore = true;
485 }
486 } while (!ok);
487 /*
488 * Restore the old i915 registers before
489 * forcing the ns2501 on.
490 */
491 if (restore)
492 restore_dvo(dvo);
493}
494
495/* set the NS2501 power state */
496static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
497{
498 unsigned char ch;
499
500 if (!ns2501_readb(dvo, NS2501_REG8, &ch))
501 return false;
502
503 if (ch & NS2501_8_PD)
504 return true;
505 else
506 return false;
507}
508
509/* set the NS2501 power state */
510static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
511{
512 bool ok;
513 bool restore = false;
514 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
515 unsigned char ch;
516
517 DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
518 __FUNCTION__, enable);
519
520 ch = ns->reg_8_shadow;
521
522 if (enable)
523 ch |= NS2501_8_PD;
524 else
525 ch &= ~NS2501_8_PD;
526
527 if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
528 ns->reg_8_set = 1;
529 ns->reg_8_shadow = ch;
530
531 do {
532 ok = true;
533 ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
534 ok &=
535 ns2501_writeb(dvo, 0x34,
536 enable ? 0x03 : 0x00);
537 ok &=
538 ns2501_writeb(dvo, 0x35,
539 enable ? 0xff : 0x00);
540 if (!ok) {
541 if (restore)
542 restore_dvo(dvo);
543 enable_dvo(dvo);
544 restore = true;
545 }
546 } while (!ok);
547
548 if (restore)
549 restore_dvo(dvo);
550 }
551}
552
553static void ns2501_dump_regs(struct intel_dvo_device *dvo)
554{
555 uint8_t val;
556
557 ns2501_readb(dvo, NS2501_FREQ_LO, &val);
558 DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
559 ns2501_readb(dvo, NS2501_FREQ_HI, &val);
560 DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
561 ns2501_readb(dvo, NS2501_REG8, &val);
562 DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
563 ns2501_readb(dvo, NS2501_REG9, &val);
564 DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
565 ns2501_readb(dvo, NS2501_REGC, &val);
566 DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
567}
568
569static void ns2501_destroy(struct intel_dvo_device *dvo)
570{
571 struct ns2501_priv *ns = dvo->dev_priv;
572
573 if (ns) {
574 kfree(ns);
575 dvo->dev_priv = NULL;
576 }
577}
578
579struct intel_dvo_dev_ops ns2501_ops = {
580 .init = ns2501_init,
581 .detect = ns2501_detect,
582 .mode_valid = ns2501_mode_valid,
583 .mode_set = ns2501_mode_set,
584 .dpms = ns2501_dpms,
585 .get_hw_state = ns2501_get_hw_state,
586 .dump_regs = ns2501_dump_regs,
587 .destroy = ns2501_destroy,
588};
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index a0b13a6f619d..4debd32e3e4c 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -208,7 +208,7 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
208} 208}
209 209
210/* set the SIL164 power state */ 210/* set the SIL164 power state */
211static void sil164_dpms(struct intel_dvo_device *dvo, int mode) 211static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
212{ 212{
213 int ret; 213 int ret;
214 unsigned char ch; 214 unsigned char ch;
@@ -217,7 +217,7 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
217 if (ret == false) 217 if (ret == false)
218 return; 218 return;
219 219
220 if (mode == DRM_MODE_DPMS_ON) 220 if (enable)
221 ch |= SIL164_8_PD; 221 ch |= SIL164_8_PD;
222 else 222 else
223 ch &= ~SIL164_8_PD; 223 ch &= ~SIL164_8_PD;
@@ -226,6 +226,21 @@ static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
226 return; 226 return;
227} 227}
228 228
229static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
230{
231 int ret;
232 unsigned char ch;
233
234 ret = sil164_readb(dvo, SIL164_REG8, &ch);
235 if (ret == false)
236 return false;
237
238 if (ch & SIL164_8_PD)
239 return true;
240 else
241 return false;
242}
243
229static void sil164_dump_regs(struct intel_dvo_device *dvo) 244static void sil164_dump_regs(struct intel_dvo_device *dvo)
230{ 245{
231 uint8_t val; 246 uint8_t val;
@@ -258,6 +273,7 @@ struct intel_dvo_dev_ops sil164_ops = {
258 .mode_valid = sil164_mode_valid, 273 .mode_valid = sil164_mode_valid,
259 .mode_set = sil164_mode_set, 274 .mode_set = sil164_mode_set,
260 .dpms = sil164_dpms, 275 .dpms = sil164_dpms,
276 .get_hw_state = sil164_get_hw_state,
261 .dump_regs = sil164_dump_regs, 277 .dump_regs = sil164_dump_regs,
262 .destroy = sil164_destroy, 278 .destroy = sil164_destroy,
263}; 279};
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index aa2cd3ec54aa..e17f1b07e915 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -234,14 +234,14 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
234} 234}
235 235
236/* set the tfp410 power state */ 236/* set the tfp410 power state */
237static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) 237static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
238{ 238{
239 uint8_t ctl1; 239 uint8_t ctl1;
240 240
241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) 241 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
242 return; 242 return;
243 243
244 if (mode == DRM_MODE_DPMS_ON) 244 if (enable)
245 ctl1 |= TFP410_CTL_1_PD; 245 ctl1 |= TFP410_CTL_1_PD;
246 else 246 else
247 ctl1 &= ~TFP410_CTL_1_PD; 247 ctl1 &= ~TFP410_CTL_1_PD;
@@ -249,6 +249,19 @@ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1); 249 tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
250} 250}
251 251
252static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
253{
254 uint8_t ctl1;
255
256 if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
257 return false;
258
259 if (ctl1 & TFP410_CTL_1_PD)
260 return true;
261 else
262 return false;
263}
264
252static void tfp410_dump_regs(struct intel_dvo_device *dvo) 265static void tfp410_dump_regs(struct intel_dvo_device *dvo)
253{ 266{
254 uint8_t val, val2; 267 uint8_t val, val2;
@@ -299,6 +312,7 @@ struct intel_dvo_dev_ops tfp410_ops = {
299 .mode_valid = tfp410_mode_valid, 312 .mode_valid = tfp410_mode_valid,
300 .mode_set = tfp410_mode_set, 313 .mode_set = tfp410_mode_set,
301 .dpms = tfp410_dpms, 314 .dpms = tfp410_dpms,
315 .get_hw_state = tfp410_get_hw_state,
302 .dump_regs = tfp410_dump_regs, 316 .dump_regs = tfp410_dump_regs,
303 .destroy = tfp410_destroy, 317 .destroy = tfp410_destroy,
304}; 318};
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 63f01e29c1fa..dde8b505bf7f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -43,7 +43,6 @@
43 43
44enum { 44enum {
45 ACTIVE_LIST, 45 ACTIVE_LIST,
46 FLUSHING_LIST,
47 INACTIVE_LIST, 46 INACTIVE_LIST,
48 PINNED_LIST, 47 PINNED_LIST,
49}; 48};
@@ -61,28 +60,11 @@ static int i915_capabilities(struct seq_file *m, void *data)
61 60
62 seq_printf(m, "gen: %d\n", info->gen); 61 seq_printf(m, "gen: %d\n", info->gen);
63 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev)); 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 63#define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 B(is_mobile); 64#define DEV_INFO_SEP ;
66 B(is_i85x); 65 DEV_INFO_FLAGS;
67 B(is_i915g); 66#undef DEV_INFO_FLAG
68 B(is_i945gm); 67#undef DEV_INFO_SEP
69 B(is_g33);
70 B(need_gfx_hws);
71 B(is_g4x);
72 B(is_pineview);
73 B(is_broadwater);
74 B(is_crestline);
75 B(has_fbc);
76 B(has_pipe_cxsr);
77 B(has_hotplug);
78 B(cursor_needs_physical);
79 B(has_overlay);
80 B(overlay_needs_physical);
81 B(supports_tv);
82 B(has_bsd_ring);
83 B(has_blt_ring);
84 B(has_llc);
85#undef B
86 68
87 return 0; 69 return 0;
88} 70}
@@ -120,20 +102,23 @@ static const char *cache_level_str(int type)
120static void 102static void
121describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 103describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
122{ 104{
123 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s", 105 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s",
124 &obj->base, 106 &obj->base,
125 get_pin_flag(obj), 107 get_pin_flag(obj),
126 get_tiling_flag(obj), 108 get_tiling_flag(obj),
127 obj->base.size / 1024, 109 obj->base.size / 1024,
128 obj->base.read_domains, 110 obj->base.read_domains,
129 obj->base.write_domain, 111 obj->base.write_domain,
130 obj->last_rendering_seqno, 112 obj->last_read_seqno,
113 obj->last_write_seqno,
131 obj->last_fenced_seqno, 114 obj->last_fenced_seqno,
132 cache_level_str(obj->cache_level), 115 cache_level_str(obj->cache_level),
133 obj->dirty ? " dirty" : "", 116 obj->dirty ? " dirty" : "",
134 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 117 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
135 if (obj->base.name) 118 if (obj->base.name)
136 seq_printf(m, " (name: %d)", obj->base.name); 119 seq_printf(m, " (name: %d)", obj->base.name);
120 if (obj->pin_count)
121 seq_printf(m, " (pinned x %d)", obj->pin_count);
137 if (obj->fence_reg != I915_FENCE_REG_NONE) 122 if (obj->fence_reg != I915_FENCE_REG_NONE)
138 seq_printf(m, " (fence: %d)", obj->fence_reg); 123 seq_printf(m, " (fence: %d)", obj->fence_reg);
139 if (obj->gtt_space != NULL) 124 if (obj->gtt_space != NULL)
@@ -176,10 +161,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
176 seq_printf(m, "Inactive:\n"); 161 seq_printf(m, "Inactive:\n");
177 head = &dev_priv->mm.inactive_list; 162 head = &dev_priv->mm.inactive_list;
178 break; 163 break;
179 case FLUSHING_LIST:
180 seq_printf(m, "Flushing:\n");
181 head = &dev_priv->mm.flushing_list;
182 break;
183 default: 164 default:
184 mutex_unlock(&dev->struct_mutex); 165 mutex_unlock(&dev->struct_mutex);
185 return -EINVAL; 166 return -EINVAL;
@@ -217,8 +198,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
217 struct drm_info_node *node = (struct drm_info_node *) m->private; 198 struct drm_info_node *node = (struct drm_info_node *) m->private;
218 struct drm_device *dev = node->minor->dev; 199 struct drm_device *dev = node->minor->dev;
219 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
220 u32 count, mappable_count; 201 u32 count, mappable_count, purgeable_count;
221 size_t size, mappable_size; 202 size_t size, mappable_size, purgeable_size;
222 struct drm_i915_gem_object *obj; 203 struct drm_i915_gem_object *obj;
223 int ret; 204 int ret;
224 205
@@ -231,13 +212,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
231 dev_priv->mm.object_memory); 212 dev_priv->mm.object_memory);
232 213
233 size = count = mappable_size = mappable_count = 0; 214 size = count = mappable_size = mappable_count = 0;
234 count_objects(&dev_priv->mm.gtt_list, gtt_list); 215 count_objects(&dev_priv->mm.bound_list, gtt_list);
235 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", 216 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
236 count, mappable_count, size, mappable_size); 217 count, mappable_count, size, mappable_size);
237 218
238 size = count = mappable_size = mappable_count = 0; 219 size = count = mappable_size = mappable_count = 0;
239 count_objects(&dev_priv->mm.active_list, mm_list); 220 count_objects(&dev_priv->mm.active_list, mm_list);
240 count_objects(&dev_priv->mm.flushing_list, mm_list);
241 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 221 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
242 count, mappable_count, size, mappable_size); 222 count, mappable_count, size, mappable_size);
243 223
@@ -246,8 +226,16 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
246 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 226 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
247 count, mappable_count, size, mappable_size); 227 count, mappable_count, size, mappable_size);
248 228
229 size = count = purgeable_size = purgeable_count = 0;
230 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
231 size += obj->base.size, ++count;
232 if (obj->madv == I915_MADV_DONTNEED)
233 purgeable_size += obj->base.size, ++purgeable_count;
234 }
235 seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
236
249 size = count = mappable_size = mappable_count = 0; 237 size = count = mappable_size = mappable_count = 0;
250 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 238 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
251 if (obj->fault_mappable) { 239 if (obj->fault_mappable) {
252 size += obj->gtt_space->size; 240 size += obj->gtt_space->size;
253 ++count; 241 ++count;
@@ -256,7 +244,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
256 mappable_size += obj->gtt_space->size; 244 mappable_size += obj->gtt_space->size;
257 ++mappable_count; 245 ++mappable_count;
258 } 246 }
247 if (obj->madv == I915_MADV_DONTNEED) {
248 purgeable_size += obj->base.size;
249 ++purgeable_count;
250 }
259 } 251 }
252 seq_printf(m, "%u purgeable objects, %zu bytes\n",
253 purgeable_count, purgeable_size);
260 seq_printf(m, "%u pinned mappable objects, %zu bytes\n", 254 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
261 mappable_count, mappable_size); 255 mappable_count, mappable_size);
262 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 256 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
@@ -285,7 +279,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
285 return ret; 279 return ret;
286 280
287 total_obj_size = total_gtt_size = count = 0; 281 total_obj_size = total_gtt_size = count = 0;
288 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 282 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
289 if (list == PINNED_LIST && obj->pin_count == 0) 283 if (list == PINNED_LIST && obj->pin_count == 0)
290 continue; 284 continue;
291 285
@@ -358,40 +352,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
358 struct drm_info_node *node = (struct drm_info_node *) m->private; 352 struct drm_info_node *node = (struct drm_info_node *) m->private;
359 struct drm_device *dev = node->minor->dev; 353 struct drm_device *dev = node->minor->dev;
360 drm_i915_private_t *dev_priv = dev->dev_private; 354 drm_i915_private_t *dev_priv = dev->dev_private;
355 struct intel_ring_buffer *ring;
361 struct drm_i915_gem_request *gem_request; 356 struct drm_i915_gem_request *gem_request;
362 int ret, count; 357 int ret, count, i;
363 358
364 ret = mutex_lock_interruptible(&dev->struct_mutex); 359 ret = mutex_lock_interruptible(&dev->struct_mutex);
365 if (ret) 360 if (ret)
366 return ret; 361 return ret;
367 362
368 count = 0; 363 count = 0;
369 if (!list_empty(&dev_priv->ring[RCS].request_list)) { 364 for_each_ring(ring, dev_priv, i) {
370 seq_printf(m, "Render requests:\n"); 365 if (list_empty(&ring->request_list))
371 list_for_each_entry(gem_request, 366 continue;
372 &dev_priv->ring[RCS].request_list, 367
373 list) { 368 seq_printf(m, "%s requests:\n", ring->name);
374 seq_printf(m, " %d @ %d\n",
375 gem_request->seqno,
376 (int) (jiffies - gem_request->emitted_jiffies));
377 }
378 count++;
379 }
380 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
381 seq_printf(m, "BSD requests:\n");
382 list_for_each_entry(gem_request,
383 &dev_priv->ring[VCS].request_list,
384 list) {
385 seq_printf(m, " %d @ %d\n",
386 gem_request->seqno,
387 (int) (jiffies - gem_request->emitted_jiffies));
388 }
389 count++;
390 }
391 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
392 seq_printf(m, "BLT requests:\n");
393 list_for_each_entry(gem_request, 369 list_for_each_entry(gem_request,
394 &dev_priv->ring[BCS].request_list, 370 &ring->request_list,
395 list) { 371 list) {
396 seq_printf(m, " %d @ %d\n", 372 seq_printf(m, " %d @ %d\n",
397 gem_request->seqno, 373 gem_request->seqno,
@@ -412,7 +388,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
412{ 388{
413 if (ring->get_seqno) { 389 if (ring->get_seqno) {
414 seq_printf(m, "Current sequence (%s): %d\n", 390 seq_printf(m, "Current sequence (%s): %d\n",
415 ring->name, ring->get_seqno(ring)); 391 ring->name, ring->get_seqno(ring, false));
416 } 392 }
417} 393}
418 394
@@ -421,14 +397,15 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
421 struct drm_info_node *node = (struct drm_info_node *) m->private; 397 struct drm_info_node *node = (struct drm_info_node *) m->private;
422 struct drm_device *dev = node->minor->dev; 398 struct drm_device *dev = node->minor->dev;
423 drm_i915_private_t *dev_priv = dev->dev_private; 399 drm_i915_private_t *dev_priv = dev->dev_private;
400 struct intel_ring_buffer *ring;
424 int ret, i; 401 int ret, i;
425 402
426 ret = mutex_lock_interruptible(&dev->struct_mutex); 403 ret = mutex_lock_interruptible(&dev->struct_mutex);
427 if (ret) 404 if (ret)
428 return ret; 405 return ret;
429 406
430 for (i = 0; i < I915_NUM_RINGS; i++) 407 for_each_ring(ring, dev_priv, i)
431 i915_ring_seqno_info(m, &dev_priv->ring[i]); 408 i915_ring_seqno_info(m, ring);
432 409
433 mutex_unlock(&dev->struct_mutex); 410 mutex_unlock(&dev->struct_mutex);
434 411
@@ -441,6 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
441 struct drm_info_node *node = (struct drm_info_node *) m->private; 418 struct drm_info_node *node = (struct drm_info_node *) m->private;
442 struct drm_device *dev = node->minor->dev; 419 struct drm_device *dev = node->minor->dev;
443 drm_i915_private_t *dev_priv = dev->dev_private; 420 drm_i915_private_t *dev_priv = dev->dev_private;
421 struct intel_ring_buffer *ring;
444 int ret, i, pipe; 422 int ret, i, pipe;
445 423
446 ret = mutex_lock_interruptible(&dev->struct_mutex); 424 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -518,13 +496,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
518 } 496 }
519 seq_printf(m, "Interrupts received: %d\n", 497 seq_printf(m, "Interrupts received: %d\n",
520 atomic_read(&dev_priv->irq_received)); 498 atomic_read(&dev_priv->irq_received));
521 for (i = 0; i < I915_NUM_RINGS; i++) { 499 for_each_ring(ring, dev_priv, i) {
522 if (IS_GEN6(dev) || IS_GEN7(dev)) { 500 if (IS_GEN6(dev) || IS_GEN7(dev)) {
523 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", 501 seq_printf(m,
524 dev_priv->ring[i].name, 502 "Graphics Interrupt mask (%s): %08x\n",
525 I915_READ_IMR(&dev_priv->ring[i])); 503 ring->name, I915_READ_IMR(ring));
526 } 504 }
527 i915_ring_seqno_info(m, &dev_priv->ring[i]); 505 i915_ring_seqno_info(m, ring);
528 } 506 }
529 mutex_unlock(&dev->struct_mutex); 507 mutex_unlock(&dev->struct_mutex);
530 508
@@ -547,7 +525,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
547 for (i = 0; i < dev_priv->num_fence_regs; i++) { 525 for (i = 0; i < dev_priv->num_fence_regs; i++) {
548 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; 526 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
549 527
550 seq_printf(m, "Fenced object[%2d] = ", i); 528 seq_printf(m, "Fence %d, pin count = %d, object = ",
529 i, dev_priv->fence_regs[i].pin_count);
551 if (obj == NULL) 530 if (obj == NULL)
552 seq_printf(m, "unused"); 531 seq_printf(m, "unused");
553 else 532 else
@@ -629,12 +608,12 @@ static void print_error_buffers(struct seq_file *m,
629 seq_printf(m, "%s [%d]:\n", name, count); 608 seq_printf(m, "%s [%d]:\n", name, count);
630 609
631 while (count--) { 610 while (count--) {
632 seq_printf(m, " %08x %8u %04x %04x %08x%s%s%s%s%s%s%s", 611 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s",
633 err->gtt_offset, 612 err->gtt_offset,
634 err->size, 613 err->size,
635 err->read_domains, 614 err->read_domains,
636 err->write_domain, 615 err->write_domain,
637 err->seqno, 616 err->rseqno, err->wseqno,
638 pin_flag(err->pinned), 617 pin_flag(err->pinned),
639 tiling_flag(err->tiling), 618 tiling_flag(err->tiling),
640 dirty_flag(err->dirty), 619 dirty_flag(err->dirty),
@@ -666,10 +645,9 @@ static void i915_ring_error_state(struct seq_file *m,
666 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 645 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
667 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 646 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
668 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 647 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
669 if (ring == RCS && INTEL_INFO(dev)->gen >= 4) { 648 if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
670 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
671 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); 649 seq_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
672 } 650
673 if (INTEL_INFO(dev)->gen >= 4) 651 if (INTEL_INFO(dev)->gen >= 4)
674 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 652 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
675 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 653 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
@@ -718,11 +696,17 @@ static int i915_error_state(struct seq_file *m, void *unused)
718 for (i = 0; i < dev_priv->num_fence_regs; i++) 696 for (i = 0; i < dev_priv->num_fence_regs; i++)
719 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 697 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
720 698
699 for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
700 seq_printf(m, " INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
701
721 if (INTEL_INFO(dev)->gen >= 6) { 702 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, "ERROR: 0x%08x\n", error->error); 703 seq_printf(m, "ERROR: 0x%08x\n", error->error);
723 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 704 seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
724 } 705 }
725 706
707 if (INTEL_INFO(dev)->gen == 7)
708 seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
709
726 for_each_ring(ring, dev_priv, i) 710 for_each_ring(ring, dev_priv, i)
727 i915_ring_error_state(m, dev, error, i); 711 i915_ring_error_state(m, dev, error, i);
728 712
@@ -798,10 +782,14 @@ i915_error_state_write(struct file *filp,
798 struct seq_file *m = filp->private_data; 782 struct seq_file *m = filp->private_data;
799 struct i915_error_state_file_priv *error_priv = m->private; 783 struct i915_error_state_file_priv *error_priv = m->private;
800 struct drm_device *dev = error_priv->dev; 784 struct drm_device *dev = error_priv->dev;
785 int ret;
801 786
802 DRM_DEBUG_DRIVER("Resetting error state\n"); 787 DRM_DEBUG_DRIVER("Resetting error state\n");
803 788
804 mutex_lock(&dev->struct_mutex); 789 ret = mutex_lock_interruptible(&dev->struct_mutex);
790 if (ret)
791 return ret;
792
805 i915_destroy_error_state(dev); 793 i915_destroy_error_state(dev);
806 mutex_unlock(&dev->struct_mutex); 794 mutex_unlock(&dev->struct_mutex);
807 795
@@ -925,7 +913,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
925 seq_printf(m, "Render p-state limit: %d\n", 913 seq_printf(m, "Render p-state limit: %d\n",
926 rp_state_limits & 0xff); 914 rp_state_limits & 0xff);
927 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 915 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
928 GEN6_CAGF_SHIFT) * 50); 916 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
929 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 917 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
930 GEN6_CURICONT_MASK); 918 GEN6_CURICONT_MASK);
931 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 919 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -941,15 +929,15 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
941 929
942 max_freq = (rp_state_cap & 0xff0000) >> 16; 930 max_freq = (rp_state_cap & 0xff0000) >> 16;
943 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 931 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
944 max_freq * 50); 932 max_freq * GT_FREQUENCY_MULTIPLIER);
945 933
946 max_freq = (rp_state_cap & 0xff00) >> 8; 934 max_freq = (rp_state_cap & 0xff00) >> 8;
947 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 935 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
948 max_freq * 50); 936 max_freq * GT_FREQUENCY_MULTIPLIER);
949 937
950 max_freq = rp_state_cap & 0xff; 938 max_freq = rp_state_cap & 0xff;
951 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 939 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
952 max_freq * 50); 940 max_freq * GT_FREQUENCY_MULTIPLIER);
953 } else { 941 } else {
954 seq_printf(m, "no P-state info available\n"); 942 seq_printf(m, "no P-state info available\n");
955 } 943 }
@@ -1291,7 +1279,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1291 1279
1292 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); 1280 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1293 1281
1294 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; 1282 for (gpu_freq = dev_priv->rps.min_delay;
1283 gpu_freq <= dev_priv->rps.max_delay;
1295 gpu_freq++) { 1284 gpu_freq++) {
1296 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1285 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1297 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1286 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
@@ -1302,7 +1291,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1302 continue; 1291 continue;
1303 } 1292 }
1304 ia_freq = I915_READ(GEN6_PCODE_DATA); 1293 ia_freq = I915_READ(GEN6_PCODE_DATA);
1305 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100); 1294 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
1306 } 1295 }
1307 1296
1308 mutex_unlock(&dev->struct_mutex); 1297 mutex_unlock(&dev->struct_mutex);
@@ -1471,8 +1460,12 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1471 struct drm_info_node *node = (struct drm_info_node *) m->private; 1460 struct drm_info_node *node = (struct drm_info_node *) m->private;
1472 struct drm_device *dev = node->minor->dev; 1461 struct drm_device *dev = node->minor->dev;
1473 struct drm_i915_private *dev_priv = dev->dev_private; 1462 struct drm_i915_private *dev_priv = dev->dev_private;
1463 int ret;
1464
1465 ret = mutex_lock_interruptible(&dev->struct_mutex);
1466 if (ret)
1467 return ret;
1474 1468
1475 mutex_lock(&dev->struct_mutex);
1476 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1469 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1477 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1470 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1478 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1471 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
@@ -1519,9 +1512,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1519 if (INTEL_INFO(dev)->gen == 6) 1512 if (INTEL_INFO(dev)->gen == 6)
1520 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 1513 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1521 1514
1522 for (i = 0; i < I915_NUM_RINGS; i++) { 1515 for_each_ring(ring, dev_priv, i) {
1523 ring = &dev_priv->ring[i];
1524
1525 seq_printf(m, "%s\n", ring->name); 1516 seq_printf(m, "%s\n", ring->name);
1526 if (INTEL_INFO(dev)->gen == 7) 1517 if (INTEL_INFO(dev)->gen == 7)
1527 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); 1518 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
@@ -1673,7 +1664,7 @@ i915_ring_stop_write(struct file *filp,
1673 struct drm_device *dev = filp->private_data; 1664 struct drm_device *dev = filp->private_data;
1674 struct drm_i915_private *dev_priv = dev->dev_private; 1665 struct drm_i915_private *dev_priv = dev->dev_private;
1675 char buf[20]; 1666 char buf[20];
1676 int val = 0; 1667 int val = 0, ret;
1677 1668
1678 if (cnt > 0) { 1669 if (cnt > 0) {
1679 if (cnt > sizeof(buf) - 1) 1670 if (cnt > sizeof(buf) - 1)
@@ -1688,7 +1679,10 @@ i915_ring_stop_write(struct file *filp,
1688 1679
1689 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1680 DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1690 1681
1691 mutex_lock(&dev->struct_mutex); 1682 ret = mutex_lock_interruptible(&dev->struct_mutex);
1683 if (ret)
1684 return ret;
1685
1692 dev_priv->stop_rings = val; 1686 dev_priv->stop_rings = val;
1693 mutex_unlock(&dev->struct_mutex); 1687 mutex_unlock(&dev->struct_mutex);
1694 1688
@@ -1712,10 +1706,18 @@ i915_max_freq_read(struct file *filp,
1712 struct drm_device *dev = filp->private_data; 1706 struct drm_device *dev = filp->private_data;
1713 drm_i915_private_t *dev_priv = dev->dev_private; 1707 drm_i915_private_t *dev_priv = dev->dev_private;
1714 char buf[80]; 1708 char buf[80];
1715 int len; 1709 int len, ret;
1710
1711 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1712 return -ENODEV;
1713
1714 ret = mutex_lock_interruptible(&dev->struct_mutex);
1715 if (ret)
1716 return ret;
1716 1717
1717 len = snprintf(buf, sizeof(buf), 1718 len = snprintf(buf, sizeof(buf),
1718 "max freq: %d\n", dev_priv->max_delay * 50); 1719 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1720 mutex_unlock(&dev->struct_mutex);
1719 1721
1720 if (len > sizeof(buf)) 1722 if (len > sizeof(buf))
1721 len = sizeof(buf); 1723 len = sizeof(buf);
@@ -1732,7 +1734,10 @@ i915_max_freq_write(struct file *filp,
1732 struct drm_device *dev = filp->private_data; 1734 struct drm_device *dev = filp->private_data;
1733 struct drm_i915_private *dev_priv = dev->dev_private; 1735 struct drm_i915_private *dev_priv = dev->dev_private;
1734 char buf[20]; 1736 char buf[20];
1735 int val = 1; 1737 int val = 1, ret;
1738
1739 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1740 return -ENODEV;
1736 1741
1737 if (cnt > 0) { 1742 if (cnt > 0) {
1738 if (cnt > sizeof(buf) - 1) 1743 if (cnt > sizeof(buf) - 1)
@@ -1747,12 +1752,17 @@ i915_max_freq_write(struct file *filp,
1747 1752
1748 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1753 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1749 1754
1755 ret = mutex_lock_interruptible(&dev->struct_mutex);
1756 if (ret)
1757 return ret;
1758
1750 /* 1759 /*
1751 * Turbo will still be enabled, but won't go above the set value. 1760 * Turbo will still be enabled, but won't go above the set value.
1752 */ 1761 */
1753 dev_priv->max_delay = val / 50; 1762 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
1754 1763
1755 gen6_set_rps(dev, val / 50); 1764 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1765 mutex_unlock(&dev->struct_mutex);
1756 1766
1757 return cnt; 1767 return cnt;
1758} 1768}
@@ -1772,10 +1782,18 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1772 struct drm_device *dev = filp->private_data; 1782 struct drm_device *dev = filp->private_data;
1773 drm_i915_private_t *dev_priv = dev->dev_private; 1783 drm_i915_private_t *dev_priv = dev->dev_private;
1774 char buf[80]; 1784 char buf[80];
1775 int len; 1785 int len, ret;
1786
1787 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1788 return -ENODEV;
1789
1790 ret = mutex_lock_interruptible(&dev->struct_mutex);
1791 if (ret)
1792 return ret;
1776 1793
1777 len = snprintf(buf, sizeof(buf), 1794 len = snprintf(buf, sizeof(buf),
1778 "min freq: %d\n", dev_priv->min_delay * 50); 1795 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1796 mutex_unlock(&dev->struct_mutex);
1779 1797
1780 if (len > sizeof(buf)) 1798 if (len > sizeof(buf))
1781 len = sizeof(buf); 1799 len = sizeof(buf);
@@ -1790,7 +1808,10 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1790 struct drm_device *dev = filp->private_data; 1808 struct drm_device *dev = filp->private_data;
1791 struct drm_i915_private *dev_priv = dev->dev_private; 1809 struct drm_i915_private *dev_priv = dev->dev_private;
1792 char buf[20]; 1810 char buf[20];
1793 int val = 1; 1811 int val = 1, ret;
1812
1813 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1814 return -ENODEV;
1794 1815
1795 if (cnt > 0) { 1816 if (cnt > 0) {
1796 if (cnt > sizeof(buf) - 1) 1817 if (cnt > sizeof(buf) - 1)
@@ -1805,12 +1826,17 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1805 1826
1806 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1827 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1807 1828
1829 ret = mutex_lock_interruptible(&dev->struct_mutex);
1830 if (ret)
1831 return ret;
1832
1808 /* 1833 /*
1809 * Turbo will still be enabled, but won't go below the set value. 1834 * Turbo will still be enabled, but won't go below the set value.
1810 */ 1835 */
1811 dev_priv->min_delay = val / 50; 1836 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
1812 1837
1813 gen6_set_rps(dev, val / 50); 1838 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1839 mutex_unlock(&dev->struct_mutex);
1814 1840
1815 return cnt; 1841 return cnt;
1816} 1842}
@@ -1833,9 +1859,15 @@ i915_cache_sharing_read(struct file *filp,
1833 drm_i915_private_t *dev_priv = dev->dev_private; 1859 drm_i915_private_t *dev_priv = dev->dev_private;
1834 char buf[80]; 1860 char buf[80];
1835 u32 snpcr; 1861 u32 snpcr;
1836 int len; 1862 int len, ret;
1863
1864 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1865 return -ENODEV;
1866
1867 ret = mutex_lock_interruptible(&dev->struct_mutex);
1868 if (ret)
1869 return ret;
1837 1870
1838 mutex_lock(&dev_priv->dev->struct_mutex);
1839 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1871 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1840 mutex_unlock(&dev_priv->dev->struct_mutex); 1872 mutex_unlock(&dev_priv->dev->struct_mutex);
1841 1873
@@ -1861,6 +1893,9 @@ i915_cache_sharing_write(struct file *filp,
1861 u32 snpcr; 1893 u32 snpcr;
1862 int val = 1; 1894 int val = 1;
1863 1895
1896 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1897 return -ENODEV;
1898
1864 if (cnt > 0) { 1899 if (cnt > 0) {
1865 if (cnt > sizeof(buf) - 1) 1900 if (cnt > sizeof(buf) - 1)
1866 return -EINVAL; 1901 return -EINVAL;
@@ -1924,16 +1959,11 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
1924{ 1959{
1925 struct drm_device *dev = inode->i_private; 1960 struct drm_device *dev = inode->i_private;
1926 struct drm_i915_private *dev_priv = dev->dev_private; 1961 struct drm_i915_private *dev_priv = dev->dev_private;
1927 int ret;
1928 1962
1929 if (INTEL_INFO(dev)->gen < 6) 1963 if (INTEL_INFO(dev)->gen < 6)
1930 return 0; 1964 return 0;
1931 1965
1932 ret = mutex_lock_interruptible(&dev->struct_mutex);
1933 if (ret)
1934 return ret;
1935 gen6_gt_force_wake_get(dev_priv); 1966 gen6_gt_force_wake_get(dev_priv);
1936 mutex_unlock(&dev->struct_mutex);
1937 1967
1938 return 0; 1968 return 0;
1939} 1969}
@@ -1946,16 +1976,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
1946 if (INTEL_INFO(dev)->gen < 6) 1976 if (INTEL_INFO(dev)->gen < 6)
1947 return 0; 1977 return 0;
1948 1978
1949 /*
1950 * It's bad that we can potentially hang userspace if struct_mutex gets
1951 * forever stuck. However, if we cannot acquire this lock it means that
1952 * almost certainly the driver has hung, is not unload-able. Therefore
1953 * hanging here is probably a minor inconvenience not to be seen my
1954 * almost every user.
1955 */
1956 mutex_lock(&dev->struct_mutex);
1957 gen6_gt_force_wake_put(dev_priv); 1979 gen6_gt_force_wake_put(dev_priv);
1958 mutex_unlock(&dev->struct_mutex);
1959 1980
1960 return 0; 1981 return 0;
1961} 1982}
@@ -2005,7 +2026,6 @@ static struct drm_info_list i915_debugfs_list[] = {
2005 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 2026 {"i915_gem_gtt", i915_gem_gtt_info, 0},
2006 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, 2027 {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2007 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 2028 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2008 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
2009 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 2029 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2010 {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, 2030 {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2011 {"i915_gem_request", i915_gem_request_info, 0}, 2031 {"i915_gem_request", i915_gem_request_info, 0},
@@ -2066,6 +2086,7 @@ int i915_debugfs_init(struct drm_minor *minor)
2066 &i915_cache_sharing_fops); 2086 &i915_cache_sharing_fops);
2067 if (ret) 2087 if (ret)
2068 return ret; 2088 return ret;
2089
2069 ret = i915_debugfs_create(minor->debugfs_root, minor, 2090 ret = i915_debugfs_create(minor->debugfs_root, minor,
2070 "i915_ring_stop", 2091 "i915_ring_stop",
2071 &i915_ring_stop_fops); 2092 &i915_ring_stop_fops);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 804f1c98e279..c9bfd83dde64 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -234,10 +234,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
234 } 234 }
235 } 235 }
236 236
237 dev_priv->cpp = init->cpp; 237 dev_priv->dri1.cpp = init->cpp;
238 dev_priv->back_offset = init->back_offset; 238 dev_priv->dri1.back_offset = init->back_offset;
239 dev_priv->front_offset = init->front_offset; 239 dev_priv->dri1.front_offset = init->front_offset;
240 dev_priv->current_page = 0; 240 dev_priv->dri1.current_page = 0;
241 if (master_priv->sarea_priv) 241 if (master_priv->sarea_priv)
242 master_priv->sarea_priv->pf_current_page = 0; 242 master_priv->sarea_priv->pf_current_page = 0;
243 243
@@ -574,7 +574,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
574 574
575 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n", 575 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
576 __func__, 576 __func__,
577 dev_priv->current_page, 577 dev_priv->dri1.current_page,
578 master_priv->sarea_priv->pf_current_page); 578 master_priv->sarea_priv->pf_current_page);
579 579
580 i915_kernel_lost_context(dev); 580 i915_kernel_lost_context(dev);
@@ -588,12 +588,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
588 588
589 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 589 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
590 OUT_RING(0); 590 OUT_RING(0);
591 if (dev_priv->current_page == 0) { 591 if (dev_priv->dri1.current_page == 0) {
592 OUT_RING(dev_priv->back_offset); 592 OUT_RING(dev_priv->dri1.back_offset);
593 dev_priv->current_page = 1; 593 dev_priv->dri1.current_page = 1;
594 } else { 594 } else {
595 OUT_RING(dev_priv->front_offset); 595 OUT_RING(dev_priv->dri1.front_offset);
596 dev_priv->current_page = 0; 596 dev_priv->dri1.current_page = 0;
597 } 597 }
598 OUT_RING(0); 598 OUT_RING(0);
599 599
@@ -612,7 +612,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
612 ADVANCE_LP_RING(); 612 ADVANCE_LP_RING();
613 } 613 }
614 614
615 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 615 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
616 return 0; 616 return 0;
617} 617}
618 618
@@ -1008,6 +1008,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
1008 case I915_PARAM_HAS_WAIT_TIMEOUT: 1008 case I915_PARAM_HAS_WAIT_TIMEOUT:
1009 value = 1; 1009 value = 1;
1010 break; 1010 break;
1011 case I915_PARAM_HAS_SEMAPHORES:
1012 value = i915_semaphore_is_enabled(dev);
1013 break;
1014 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1015 value = 1;
1016 break;
1011 default: 1017 default:
1012 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1018 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1013 param->param); 1019 param->param);
@@ -1424,6 +1430,21 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1424 kfree(ap); 1430 kfree(ap);
1425} 1431}
1426 1432
1433static void i915_dump_device_info(struct drm_i915_private *dev_priv)
1434{
1435 const struct intel_device_info *info = dev_priv->info;
1436
1437#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
1438#define DEV_INFO_SEP ,
1439 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1440 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
1441 info->gen,
1442 dev_priv->dev->pdev->device,
1443 DEV_INFO_FLAGS);
1444#undef DEV_INFO_FLAG
1445#undef DEV_INFO_SEP
1446}
1447
1427/** 1448/**
1428 * i915_driver_load - setup chip and create an initial config 1449 * i915_driver_load - setup chip and create an initial config
1429 * @dev: DRM device 1450 * @dev: DRM device
@@ -1439,7 +1460,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1439{ 1460{
1440 struct drm_i915_private *dev_priv; 1461 struct drm_i915_private *dev_priv;
1441 struct intel_device_info *info; 1462 struct intel_device_info *info;
1442 int ret = 0, mmio_bar; 1463 int ret = 0, mmio_bar, mmio_size;
1443 uint32_t aperture_size; 1464 uint32_t aperture_size;
1444 1465
1445 info = (struct intel_device_info *) flags; 1466 info = (struct intel_device_info *) flags;
@@ -1448,7 +1469,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1448 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1469 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1449 return -ENODEV; 1470 return -ENODEV;
1450 1471
1451
1452 /* i915 has 4 more counters */ 1472 /* i915 has 4 more counters */
1453 dev->counters += 4; 1473 dev->counters += 4;
1454 dev->types[6] = _DRM_STAT_IRQ; 1474 dev->types[6] = _DRM_STAT_IRQ;
@@ -1464,6 +1484,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1464 dev_priv->dev = dev; 1484 dev_priv->dev = dev;
1465 dev_priv->info = info; 1485 dev_priv->info = info;
1466 1486
1487 i915_dump_device_info(dev_priv);
1488
1467 if (i915_get_bridge_dev(dev)) { 1489 if (i915_get_bridge_dev(dev)) {
1468 ret = -EIO; 1490 ret = -EIO;
1469 goto free_priv; 1491 goto free_priv;
@@ -1503,7 +1525,19 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1503 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1525 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1504 1526
1505 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1527 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1506 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1528 /* Before gen4, the registers and the GTT are behind different BARs.
1529 * However, from gen4 onwards, the registers and the GTT are shared
1530 * in the same BAR, so we want to restrict this ioremap from
1531 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1532 * the register BAR remains the same size for all the earlier
1533 * generations up to Ironlake.
1534 */
1535 if (info->gen < 5)
1536 mmio_size = 512*1024;
1537 else
1538 mmio_size = 2*1024*1024;
1539
1540 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1507 if (!dev_priv->regs) { 1541 if (!dev_priv->regs) {
1508 DRM_ERROR("failed to map registers\n"); 1542 DRM_ERROR("failed to map registers\n");
1509 ret = -EIO; 1543 ret = -EIO;
@@ -1535,11 +1569,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1535 * 1569 *
1536 * All tasks on the workqueue are expected to acquire the dev mutex 1570 * All tasks on the workqueue are expected to acquire the dev mutex
1537 * so there is no point in running more than one instance of the 1571 * so there is no point in running more than one instance of the
1538 * workqueue at any time: max_active = 1 and NON_REENTRANT. 1572 * workqueue at any time. Use an ordered one.
1539 */ 1573 */
1540 dev_priv->wq = alloc_workqueue("i915", 1574 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1541 WQ_UNBOUND | WQ_NON_REENTRANT,
1542 1);
1543 if (dev_priv->wq == NULL) { 1575 if (dev_priv->wq == NULL) {
1544 DRM_ERROR("Failed to create our workqueue.\n"); 1576 DRM_ERROR("Failed to create our workqueue.\n");
1545 ret = -ENOMEM; 1577 ret = -ENOMEM;
@@ -1585,7 +1617,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1585 1617
1586 spin_lock_init(&dev_priv->irq_lock); 1618 spin_lock_init(&dev_priv->irq_lock);
1587 spin_lock_init(&dev_priv->error_lock); 1619 spin_lock_init(&dev_priv->error_lock);
1588 spin_lock_init(&dev_priv->rps_lock); 1620 spin_lock_init(&dev_priv->rps.lock);
1589 spin_lock_init(&dev_priv->dpio_lock); 1621 spin_lock_init(&dev_priv->dpio_lock);
1590 1622
1591 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1623 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
@@ -1835,6 +1867,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
1835 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1867 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1836 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1868 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1837 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), 1869 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
1870 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
1871 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
1838 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), 1872 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
1839 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1873 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1840 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1874 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1857,6 +1891,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
1857 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), 1891 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
1858 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), 1892 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
1859 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), 1893 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
1894 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
1860}; 1895};
1861 1896
1862int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1897int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f6825324e72d..aac4e5e1a5b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -469,6 +469,9 @@ static int i915_drm_freeze(struct drm_device *dev)
469 "GEM idle failed, resume might fail\n"); 469 "GEM idle failed, resume might fail\n");
470 return error; 470 return error;
471 } 471 }
472
473 intel_modeset_disable(dev);
474
472 drm_irq_uninstall(dev); 475 drm_irq_uninstall(dev);
473 } 476 }
474 477
@@ -542,13 +545,9 @@ static int i915_drm_thaw(struct drm_device *dev)
542 mutex_unlock(&dev->struct_mutex); 545 mutex_unlock(&dev->struct_mutex);
543 546
544 intel_modeset_init_hw(dev); 547 intel_modeset_init_hw(dev);
548 intel_modeset_setup_hw_state(dev);
545 drm_mode_config_reset(dev); 549 drm_mode_config_reset(dev);
546 drm_irq_install(dev); 550 drm_irq_install(dev);
547
548 /* Resume the modeset for every activated CRTC */
549 mutex_lock(&dev->mode_config.mutex);
550 drm_helper_resume_force_mode(dev);
551 mutex_unlock(&dev->mode_config.mutex);
552 } 551 }
553 552
554 intel_opregion_init(dev); 553 intel_opregion_init(dev);
@@ -1059,7 +1058,7 @@ static bool IS_DISPLAYREG(u32 reg)
1059 * This should make it easier to transition modules over to the 1058 * This should make it easier to transition modules over to the
1060 * new register block scheme, since we can do it incrementally. 1059 * new register block scheme, since we can do it incrementally.
1061 */ 1060 */
1062 if (reg >= 0x180000) 1061 if (reg >= VLV_DISPLAY_BASE)
1063 return false; 1062 return false;
1064 1063
1065 if (reg >= RENDER_RING_BASE && 1064 if (reg >= RENDER_RING_BASE &&
@@ -1173,9 +1172,59 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1173 if (unlikely(__fifo_ret)) { \ 1172 if (unlikely(__fifo_ret)) { \
1174 gen6_gt_check_fifodbg(dev_priv); \ 1173 gen6_gt_check_fifodbg(dev_priv); \
1175 } \ 1174 } \
1175 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1176 DRM_ERROR("Unclaimed write to %x\n", reg); \
1177 writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \
1178 } \
1176} 1179}
1177__i915_write(8, b) 1180__i915_write(8, b)
1178__i915_write(16, w) 1181__i915_write(16, w)
1179__i915_write(32, l) 1182__i915_write(32, l)
1180__i915_write(64, q) 1183__i915_write(64, q)
1181#undef __i915_write 1184#undef __i915_write
1185
1186static const struct register_whitelist {
1187 uint64_t offset;
1188 uint32_t size;
1189 uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1190} whitelist[] = {
1191 { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
1192};
1193
1194int i915_reg_read_ioctl(struct drm_device *dev,
1195 void *data, struct drm_file *file)
1196{
1197 struct drm_i915_private *dev_priv = dev->dev_private;
1198 struct drm_i915_reg_read *reg = data;
1199 struct register_whitelist const *entry = whitelist;
1200 int i;
1201
1202 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1203 if (entry->offset == reg->offset &&
1204 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1205 break;
1206 }
1207
1208 if (i == ARRAY_SIZE(whitelist))
1209 return -EINVAL;
1210
1211 switch (entry->size) {
1212 case 8:
1213 reg->val = I915_READ64(reg->offset);
1214 break;
1215 case 4:
1216 reg->val = I915_READ(reg->offset);
1217 break;
1218 case 2:
1219 reg->val = I915_READ16(reg->offset);
1220 break;
1221 case 1:
1222 reg->val = I915_READ8(reg->offset);
1223 break;
1224 default:
1225 WARN_ON(1);
1226 return -EINVAL;
1227 }
1228
1229 return 0;
1230}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 627fe35781b4..4f2831aa5fed 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -109,6 +109,7 @@ struct intel_pch_pll {
109 109
110#define WATCH_COHERENCY 0 110#define WATCH_COHERENCY 0
111#define WATCH_LISTS 0 111#define WATCH_LISTS 0
112#define WATCH_GTT 0
112 113
113#define I915_GEM_PHYS_CURSOR_0 1 114#define I915_GEM_PHYS_CURSOR_0 1
114#define I915_GEM_PHYS_CURSOR_1 2 115#define I915_GEM_PHYS_CURSOR_1 2
@@ -195,9 +196,10 @@ struct drm_i915_error_state {
195 u32 cpu_ring_head[I915_NUM_RINGS]; 196 u32 cpu_ring_head[I915_NUM_RINGS];
196 u32 cpu_ring_tail[I915_NUM_RINGS]; 197 u32 cpu_ring_tail[I915_NUM_RINGS];
197 u32 error; /* gen6+ */ 198 u32 error; /* gen6+ */
199 u32 err_int; /* gen7 */
198 u32 instpm[I915_NUM_RINGS]; 200 u32 instpm[I915_NUM_RINGS];
199 u32 instps[I915_NUM_RINGS]; 201 u32 instps[I915_NUM_RINGS];
200 u32 instdone1; 202 u32 extra_instdone[I915_NUM_INSTDONE_REG];
201 u32 seqno[I915_NUM_RINGS]; 203 u32 seqno[I915_NUM_RINGS];
202 u64 bbaddr; 204 u64 bbaddr;
203 u32 fault_reg[I915_NUM_RINGS]; 205 u32 fault_reg[I915_NUM_RINGS];
@@ -221,7 +223,7 @@ struct drm_i915_error_state {
221 struct drm_i915_error_buffer { 223 struct drm_i915_error_buffer {
222 u32 size; 224 u32 size;
223 u32 name; 225 u32 name;
224 u32 seqno; 226 u32 rseqno, wseqno;
225 u32 gtt_offset; 227 u32 gtt_offset;
226 u32 read_domains; 228 u32 read_domains;
227 u32 write_domain; 229 u32 write_domain;
@@ -239,7 +241,6 @@ struct drm_i915_error_state {
239}; 241};
240 242
241struct drm_i915_display_funcs { 243struct drm_i915_display_funcs {
242 void (*dpms)(struct drm_crtc *crtc, int mode);
243 bool (*fbc_enabled)(struct drm_device *dev); 244 bool (*fbc_enabled)(struct drm_device *dev);
244 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 245 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
245 void (*disable_fbc)(struct drm_device *dev); 246 void (*disable_fbc)(struct drm_device *dev);
@@ -248,7 +249,6 @@ struct drm_i915_display_funcs {
248 void (*update_wm)(struct drm_device *dev); 249 void (*update_wm)(struct drm_device *dev);
249 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 250 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
250 uint32_t sprite_width, int pixel_size); 251 uint32_t sprite_width, int pixel_size);
251 void (*sanitize_pm)(struct drm_device *dev);
252 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 252 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
253 struct drm_display_mode *mode); 253 struct drm_display_mode *mode);
254 int (*crtc_mode_set)(struct drm_crtc *crtc, 254 int (*crtc_mode_set)(struct drm_crtc *crtc,
@@ -256,6 +256,8 @@ struct drm_i915_display_funcs {
256 struct drm_display_mode *adjusted_mode, 256 struct drm_display_mode *adjusted_mode,
257 int x, int y, 257 int x, int y,
258 struct drm_framebuffer *old_fb); 258 struct drm_framebuffer *old_fb);
259 void (*crtc_enable)(struct drm_crtc *crtc);
260 void (*crtc_disable)(struct drm_crtc *crtc);
259 void (*off)(struct drm_crtc *crtc); 261 void (*off)(struct drm_crtc *crtc);
260 void (*write_eld)(struct drm_connector *connector, 262 void (*write_eld)(struct drm_connector *connector,
261 struct drm_crtc *crtc); 263 struct drm_crtc *crtc);
@@ -279,6 +281,32 @@ struct drm_i915_gt_funcs {
279 void (*force_wake_put)(struct drm_i915_private *dev_priv); 281 void (*force_wake_put)(struct drm_i915_private *dev_priv);
280}; 282};
281 283
284#define DEV_INFO_FLAGS \
285 DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
286 DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
287 DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
288 DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
289 DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
290 DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
291 DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
292 DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
293 DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
294 DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
295 DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
296 DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
297 DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
298 DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
299 DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
300 DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
301 DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
302 DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
303 DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
304 DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
305 DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
306 DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
307 DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
308 DEV_INFO_FLAG(has_llc)
309
282struct intel_device_info { 310struct intel_device_info {
283 u8 gen; 311 u8 gen;
284 u8 is_mobile:1; 312 u8 is_mobile:1;
@@ -402,12 +430,6 @@ typedef struct drm_i915_private {
402 430
403 struct resource mch_res; 431 struct resource mch_res;
404 432
405 unsigned int cpp;
406 int back_offset;
407 int front_offset;
408 int current_page;
409 int page_flipping;
410
411 atomic_t irq_received; 433 atomic_t irq_received;
412 434
413 /* protects the irq masks */ 435 /* protects the irq masks */
@@ -425,7 +447,6 @@ typedef struct drm_i915_private {
425 u32 hotplug_supported_mask; 447 u32 hotplug_supported_mask;
426 struct work_struct hotplug_work; 448 struct work_struct hotplug_work;
427 449
428 unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
429 int num_pipe; 450 int num_pipe;
430 int num_pch_pll; 451 int num_pch_pll;
431 452
@@ -434,8 +455,7 @@ typedef struct drm_i915_private {
434 struct timer_list hangcheck_timer; 455 struct timer_list hangcheck_timer;
435 int hangcheck_count; 456 int hangcheck_count;
436 uint32_t last_acthd[I915_NUM_RINGS]; 457 uint32_t last_acthd[I915_NUM_RINGS];
437 uint32_t last_instdone; 458 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
438 uint32_t last_instdone1;
439 459
440 unsigned int stop_rings; 460 unsigned int stop_rings;
441 461
@@ -666,7 +686,13 @@ typedef struct drm_i915_private {
666 struct drm_mm gtt_space; 686 struct drm_mm gtt_space;
667 /** List of all objects in gtt_space. Used to restore gtt 687 /** List of all objects in gtt_space. Used to restore gtt
668 * mappings on resume */ 688 * mappings on resume */
669 struct list_head gtt_list; 689 struct list_head bound_list;
690 /**
691 * List of objects which are not bound to the GTT (thus
692 * are idle and not used by the GPU) but still have
693 * (presumably uncached) pages still attached.
694 */
695 struct list_head unbound_list;
670 696
671 /** Usable portion of the GTT for GEM */ 697 /** Usable portion of the GTT for GEM */
672 unsigned long gtt_start; 698 unsigned long gtt_start;
@@ -696,17 +722,6 @@ typedef struct drm_i915_private {
696 struct list_head active_list; 722 struct list_head active_list;
697 723
698 /** 724 /**
699 * List of objects which are not in the ringbuffer but which
700 * still have a write_domain which needs to be flushed before
701 * unbinding.
702 *
703 * last_rendering_seqno is 0 while an object is in this list.
704 *
705 * A reference is held on the buffer while on this list.
706 */
707 struct list_head flushing_list;
708
709 /**
710 * LRU list of objects which are not in the ringbuffer and 725 * LRU list of objects which are not in the ringbuffer and
711 * are ready to unbind, but are still in the GTT. 726 * are ready to unbind, but are still in the GTT.
712 * 727 *
@@ -775,6 +790,12 @@ typedef struct drm_i915_private {
775 struct { 790 struct {
776 unsigned allow_batchbuffer : 1; 791 unsigned allow_batchbuffer : 1;
777 u32 __iomem *gfx_hws_cpu_addr; 792 u32 __iomem *gfx_hws_cpu_addr;
793
794 unsigned int cpp;
795 int back_offset;
796 int front_offset;
797 int current_page;
798 int page_flipping;
778 } dri1; 799 } dri1;
779 800
780 /* Kernel Modesetting */ 801 /* Kernel Modesetting */
@@ -796,9 +817,6 @@ typedef struct drm_i915_private {
796 bool lvds_downclock_avail; 817 bool lvds_downclock_avail;
797 /* indicates the reduced downclock for LVDS*/ 818 /* indicates the reduced downclock for LVDS*/
798 int lvds_downclock; 819 int lvds_downclock;
799 struct work_struct idle_work;
800 struct timer_list idle_timer;
801 bool busy;
802 u16 orig_clock; 820 u16 orig_clock;
803 int child_dev_num; 821 int child_dev_num;
804 struct child_device_config *child_dev; 822 struct child_device_config *child_dev;
@@ -807,26 +825,41 @@ typedef struct drm_i915_private {
807 825
808 bool mchbar_need_disable; 826 bool mchbar_need_disable;
809 827
810 struct work_struct rps_work; 828 /* gen6+ rps state */
811 spinlock_t rps_lock; 829 struct {
812 u32 pm_iir; 830 struct work_struct work;
813 831 u32 pm_iir;
814 u8 cur_delay; 832 /* lock - irqsave spinlock that protectects the work_struct and
815 u8 min_delay; 833 * pm_iir. */
816 u8 max_delay; 834 spinlock_t lock;
817 u8 fmax; 835
818 u8 fstart; 836 /* The below variables an all the rps hw state are protected by
819 837 * dev->struct mutext. */
820 u64 last_count1; 838 u8 cur_delay;
821 unsigned long last_time1; 839 u8 min_delay;
822 unsigned long chipset_power; 840 u8 max_delay;
823 u64 last_count2; 841 } rps;
824 struct timespec last_time2; 842
825 unsigned long gfx_power; 843 /* ilk-only ips/rps state. Everything in here is protected by the global
826 int c_m; 844 * mchdev_lock in intel_pm.c */
827 int r_t; 845 struct {
828 u8 corr; 846 u8 cur_delay;
829 spinlock_t *mchdev_lock; 847 u8 min_delay;
848 u8 max_delay;
849 u8 fmax;
850 u8 fstart;
851
852 u64 last_count1;
853 unsigned long last_time1;
854 unsigned long chipset_power;
855 u64 last_count2;
856 struct timespec last_time2;
857 unsigned long gfx_power;
858 u8 corr;
859
860 int c_m;
861 int r_t;
862 } ips;
830 863
831 enum no_fbc_reason no_fbc_reason; 864 enum no_fbc_reason no_fbc_reason;
832 865
@@ -861,30 +894,48 @@ enum hdmi_force_audio {
861}; 894};
862 895
863enum i915_cache_level { 896enum i915_cache_level {
864 I915_CACHE_NONE, 897 I915_CACHE_NONE = 0,
865 I915_CACHE_LLC, 898 I915_CACHE_LLC,
866 I915_CACHE_LLC_MLC, /* gen6+ */ 899 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
900};
901
902struct drm_i915_gem_object_ops {
903 /* Interface between the GEM object and its backing storage.
904 * get_pages() is called once prior to the use of the associated set
905 * of pages before to binding them into the GTT, and put_pages() is
906 * called after we no longer need them. As we expect there to be
907 * associated cost with migrating pages between the backing storage
908 * and making them available for the GPU (e.g. clflush), we may hold
909 * onto the pages after they are no longer referenced by the GPU
910 * in case they may be used again shortly (for example migrating the
911 * pages to a different memory domain within the GTT). put_pages()
912 * will therefore most likely be called when the object itself is
913 * being released or under memory pressure (where we attempt to
914 * reap pages for the shrinker).
915 */
916 int (*get_pages)(struct drm_i915_gem_object *);
917 void (*put_pages)(struct drm_i915_gem_object *);
867}; 918};
868 919
869struct drm_i915_gem_object { 920struct drm_i915_gem_object {
870 struct drm_gem_object base; 921 struct drm_gem_object base;
871 922
923 const struct drm_i915_gem_object_ops *ops;
924
872 /** Current space allocated to this object in the GTT, if any. */ 925 /** Current space allocated to this object in the GTT, if any. */
873 struct drm_mm_node *gtt_space; 926 struct drm_mm_node *gtt_space;
874 struct list_head gtt_list; 927 struct list_head gtt_list;
875 928
876 /** This object's place on the active/flushing/inactive lists */ 929 /** This object's place on the active/inactive lists */
877 struct list_head ring_list; 930 struct list_head ring_list;
878 struct list_head mm_list; 931 struct list_head mm_list;
879 /** This object's place on GPU write list */
880 struct list_head gpu_write_list;
881 /** This object's place in the batchbuffer or on the eviction list */ 932 /** This object's place in the batchbuffer or on the eviction list */
882 struct list_head exec_list; 933 struct list_head exec_list;
883 934
884 /** 935 /**
885 * This is set if the object is on the active or flushing lists 936 * This is set if the object is on the active lists (has pending
886 * (has pending rendering), and is not set if it's on inactive (ready 937 * rendering and so a non-zero seqno), and is not set if it i s on
887 * to be unbound). 938 * inactive (ready to be unbound) list.
888 */ 939 */
889 unsigned int active:1; 940 unsigned int active:1;
890 941
@@ -895,12 +946,6 @@ struct drm_i915_gem_object {
895 unsigned int dirty:1; 946 unsigned int dirty:1;
896 947
897 /** 948 /**
898 * This is set if the object has been written to since the last
899 * GPU flush.
900 */
901 unsigned int pending_gpu_write:1;
902
903 /**
904 * Fence register bits (if any) for this object. Will be set 949 * Fence register bits (if any) for this object. Will be set
905 * as needed when mapped into the GTT. 950 * as needed when mapped into the GTT.
906 * Protected by dev->struct_mutex. 951 * Protected by dev->struct_mutex.
@@ -961,17 +1006,12 @@ struct drm_i915_gem_object {
961 1006
962 unsigned int has_aliasing_ppgtt_mapping:1; 1007 unsigned int has_aliasing_ppgtt_mapping:1;
963 unsigned int has_global_gtt_mapping:1; 1008 unsigned int has_global_gtt_mapping:1;
1009 unsigned int has_dma_mapping:1;
964 1010
965 struct page **pages; 1011 struct sg_table *pages;
966 1012 int pages_pin_count;
967 /**
968 * DMAR support
969 */
970 struct scatterlist *sg_list;
971 int num_sg;
972 1013
973 /* prime dma-buf support */ 1014 /* prime dma-buf support */
974 struct sg_table *sg_table;
975 void *dma_buf_vmapping; 1015 void *dma_buf_vmapping;
976 int vmapping_count; 1016 int vmapping_count;
977 1017
@@ -992,7 +1032,8 @@ struct drm_i915_gem_object {
992 struct intel_ring_buffer *ring; 1032 struct intel_ring_buffer *ring;
993 1033
994 /** Breadcrumb of last rendering to the buffer. */ 1034 /** Breadcrumb of last rendering to the buffer. */
995 uint32_t last_rendering_seqno; 1035 uint32_t last_read_seqno;
1036 uint32_t last_write_seqno;
996 /** Breadcrumb of last fenced GPU access to the buffer. */ 1037 /** Breadcrumb of last fenced GPU access to the buffer. */
997 uint32_t last_fenced_seqno; 1038 uint32_t last_fenced_seqno;
998 1039
@@ -1135,6 +1176,10 @@ struct drm_i915_file_private {
1135 1176
1136#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1177#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1137 1178
1179#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1180
1181#define GT_FREQUENCY_MULTIPLIER 50
1182
1138#include "i915_trace.h" 1183#include "i915_trace.h"
1139 1184
1140/** 1185/**
@@ -1256,6 +1301,10 @@ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1256 struct drm_file *file_priv); 1301 struct drm_file *file_priv);
1257int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1302int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1258 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1304int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1305 struct drm_file *file);
1306int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1307 struct drm_file *file);
1259int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1308int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1260 struct drm_file *file_priv); 1309 struct drm_file *file_priv);
1261int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1310int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
@@ -1274,24 +1323,42 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv); 1323 struct drm_file *file_priv);
1275void i915_gem_load(struct drm_device *dev); 1324void i915_gem_load(struct drm_device *dev);
1276int i915_gem_init_object(struct drm_gem_object *obj); 1325int i915_gem_init_object(struct drm_gem_object *obj);
1277int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, 1326void i915_gem_object_init(struct drm_i915_gem_object *obj,
1278 uint32_t invalidate_domains, 1327 const struct drm_i915_gem_object_ops *ops);
1279 uint32_t flush_domains);
1280struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1328struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1281 size_t size); 1329 size_t size);
1282void i915_gem_free_object(struct drm_gem_object *obj); 1330void i915_gem_free_object(struct drm_gem_object *obj);
1283int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1331int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1284 uint32_t alignment, 1332 uint32_t alignment,
1285 bool map_and_fenceable); 1333 bool map_and_fenceable,
1334 bool nonblocking);
1286void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1335void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1287int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1336int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1288void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1337void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1289void i915_gem_lastclose(struct drm_device *dev); 1338void i915_gem_lastclose(struct drm_device *dev);
1290 1339
1291int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1340int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1292 gfp_t gfpmask); 1341static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1342{
1343 struct scatterlist *sg = obj->pages->sgl;
1344 while (n >= SG_MAX_SINGLE_ALLOC) {
1345 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1346 n -= SG_MAX_SINGLE_ALLOC - 1;
1347 }
1348 return sg_page(sg+n);
1349}
1350static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1351{
1352 BUG_ON(obj->pages == NULL);
1353 obj->pages_pin_count++;
1354}
1355static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1356{
1357 BUG_ON(obj->pages_pin_count == 0);
1358 obj->pages_pin_count--;
1359}
1360
1293int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1361int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1294int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1295int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1362int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1296 struct intel_ring_buffer *to); 1363 struct intel_ring_buffer *to);
1297void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1364void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
@@ -1358,9 +1425,9 @@ void i915_gem_init_ppgtt(struct drm_device *dev);
1358void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1425void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1359int __must_check i915_gpu_idle(struct drm_device *dev); 1426int __must_check i915_gpu_idle(struct drm_device *dev);
1360int __must_check i915_gem_idle(struct drm_device *dev); 1427int __must_check i915_gem_idle(struct drm_device *dev);
1361int __must_check i915_add_request(struct intel_ring_buffer *ring, 1428int i915_add_request(struct intel_ring_buffer *ring,
1362 struct drm_file *file, 1429 struct drm_file *file,
1363 struct drm_i915_gem_request *request); 1430 struct drm_i915_gem_request *request);
1364int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1431int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1365 uint32_t seqno); 1432 uint32_t seqno);
1366int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1433int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -1429,8 +1496,11 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1429 1496
1430/* i915_gem_evict.c */ 1497/* i915_gem_evict.c */
1431int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1498int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1432 unsigned alignment, bool mappable); 1499 unsigned alignment,
1433int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); 1500 unsigned cache_level,
1501 bool mappable,
1502 bool nonblock);
1503int i915_gem_evict_everything(struct drm_device *dev);
1434 1504
1435/* i915_gem_stolen.c */ 1505/* i915_gem_stolen.c */
1436int i915_gem_init_stolen(struct drm_device *dev); 1506int i915_gem_init_stolen(struct drm_device *dev);
@@ -1519,6 +1589,7 @@ extern void intel_modeset_init(struct drm_device *dev);
1519extern void intel_modeset_gem_init(struct drm_device *dev); 1589extern void intel_modeset_gem_init(struct drm_device *dev);
1520extern void intel_modeset_cleanup(struct drm_device *dev); 1590extern void intel_modeset_cleanup(struct drm_device *dev);
1521extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1591extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1592extern void intel_modeset_setup_hw_state(struct drm_device *dev);
1522extern bool intel_fbc_enabled(struct drm_device *dev); 1593extern bool intel_fbc_enabled(struct drm_device *dev);
1523extern void intel_disable_fbc(struct drm_device *dev); 1594extern void intel_disable_fbc(struct drm_device *dev);
1524extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1595extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1529,6 +1600,8 @@ extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1529extern int intel_enable_rc6(const struct drm_device *dev); 1600extern int intel_enable_rc6(const struct drm_device *dev);
1530 1601
1531extern bool i915_semaphore_is_enabled(struct drm_device *dev); 1602extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1603int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1604 struct drm_file *file);
1532 1605
1533/* overlay */ 1606/* overlay */
1534#ifdef CONFIG_DEBUG_FS 1607#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2c93f7be8ed..e957f3740f68 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -36,12 +36,12 @@
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/dma-buf.h> 37#include <linux/dma-buf.h>
38 38
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 41static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment, 42 unsigned alignment,
44 bool map_and_fenceable); 43 bool map_and_fenceable,
44 bool nonblocking);
45static int i915_gem_phys_pwrite(struct drm_device *dev, 45static int i915_gem_phys_pwrite(struct drm_device *dev,
46 struct drm_i915_gem_object *obj, 46 struct drm_i915_gem_object *obj,
47 struct drm_i915_gem_pwrite *args, 47 struct drm_i915_gem_pwrite *args,
@@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
55 55
56static int i915_gem_inactive_shrink(struct shrinker *shrinker, 56static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57 struct shrink_control *sc); 57 struct shrink_control *sc);
58static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
58static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 60static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
59 61
60static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) 62static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
@@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
140static inline bool 142static inline bool
141i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) 143i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
142{ 144{
143 return !obj->active; 145 return obj->gtt_space && !obj->active;
144} 146}
145 147
146int 148int
@@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
179 181
180 pinned = 0; 182 pinned = 0;
181 mutex_lock(&dev->struct_mutex); 183 mutex_lock(&dev->struct_mutex);
182 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) 184 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
183 if (obj->pin_count) 185 if (obj->pin_count)
184 pinned += obj->gtt_space->size; 186 pinned += obj->gtt_space->size;
185 mutex_unlock(&dev->struct_mutex); 187 mutex_unlock(&dev->struct_mutex);
@@ -340,7 +342,7 @@ shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
340 page_length); 342 page_length);
341 kunmap_atomic(vaddr); 343 kunmap_atomic(vaddr);
342 344
343 return ret; 345 return ret ? -EFAULT : 0;
344} 346}
345 347
346static void 348static void
@@ -391,7 +393,7 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
391 page_length); 393 page_length);
392 kunmap(page); 394 kunmap(page);
393 395
394 return ret; 396 return ret ? - EFAULT : 0;
395} 397}
396 398
397static int 399static int
@@ -400,7 +402,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
400 struct drm_i915_gem_pread *args, 402 struct drm_i915_gem_pread *args,
401 struct drm_file *file) 403 struct drm_file *file)
402{ 404{
403 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
404 char __user *user_data; 405 char __user *user_data;
405 ssize_t remain; 406 ssize_t remain;
406 loff_t offset; 407 loff_t offset;
@@ -409,7 +410,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
409 int hit_slowpath = 0; 410 int hit_slowpath = 0;
410 int prefaulted = 0; 411 int prefaulted = 0;
411 int needs_clflush = 0; 412 int needs_clflush = 0;
412 int release_page; 413 struct scatterlist *sg;
414 int i;
413 415
414 user_data = (char __user *) (uintptr_t) args->data_ptr; 416 user_data = (char __user *) (uintptr_t) args->data_ptr;
415 remain = args->size; 417 remain = args->size;
@@ -423,16 +425,30 @@ i915_gem_shmem_pread(struct drm_device *dev,
423 * anyway again before the next pread happens. */ 425 * anyway again before the next pread happens. */
424 if (obj->cache_level == I915_CACHE_NONE) 426 if (obj->cache_level == I915_CACHE_NONE)
425 needs_clflush = 1; 427 needs_clflush = 1;
426 ret = i915_gem_object_set_to_gtt_domain(obj, false); 428 if (obj->gtt_space) {
427 if (ret) 429 ret = i915_gem_object_set_to_gtt_domain(obj, false);
428 return ret; 430 if (ret)
431 return ret;
432 }
429 } 433 }
430 434
435 ret = i915_gem_object_get_pages(obj);
436 if (ret)
437 return ret;
438
439 i915_gem_object_pin_pages(obj);
440
431 offset = args->offset; 441 offset = args->offset;
432 442
433 while (remain > 0) { 443 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
434 struct page *page; 444 struct page *page;
435 445
446 if (i < offset >> PAGE_SHIFT)
447 continue;
448
449 if (remain <= 0)
450 break;
451
436 /* Operation in this page 452 /* Operation in this page
437 * 453 *
438 * shmem_page_offset = offset within page in shmem file 454 * shmem_page_offset = offset within page in shmem file
@@ -443,18 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
443 if ((shmem_page_offset + page_length) > PAGE_SIZE) 459 if ((shmem_page_offset + page_length) > PAGE_SIZE)
444 page_length = PAGE_SIZE - shmem_page_offset; 460 page_length = PAGE_SIZE - shmem_page_offset;
445 461
446 if (obj->pages) { 462 page = sg_page(sg);
447 page = obj->pages[offset >> PAGE_SHIFT];
448 release_page = 0;
449 } else {
450 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
451 if (IS_ERR(page)) {
452 ret = PTR_ERR(page);
453 goto out;
454 }
455 release_page = 1;
456 }
457
458 page_do_bit17_swizzling = obj_do_bit17_swizzling && 463 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
459 (page_to_phys(page) & (1 << 17)) != 0; 464 (page_to_phys(page) & (1 << 17)) != 0;
460 465
@@ -465,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
465 goto next_page; 470 goto next_page;
466 471
467 hit_slowpath = 1; 472 hit_slowpath = 1;
468 page_cache_get(page);
469 mutex_unlock(&dev->struct_mutex); 473 mutex_unlock(&dev->struct_mutex);
470 474
471 if (!prefaulted) { 475 if (!prefaulted) {
@@ -483,16 +487,12 @@ i915_gem_shmem_pread(struct drm_device *dev,
483 needs_clflush); 487 needs_clflush);
484 488
485 mutex_lock(&dev->struct_mutex); 489 mutex_lock(&dev->struct_mutex);
486 page_cache_release(page); 490
487next_page: 491next_page:
488 mark_page_accessed(page); 492 mark_page_accessed(page);
489 if (release_page)
490 page_cache_release(page);
491 493
492 if (ret) { 494 if (ret)
493 ret = -EFAULT;
494 goto out; 495 goto out;
495 }
496 496
497 remain -= page_length; 497 remain -= page_length;
498 user_data += page_length; 498 user_data += page_length;
@@ -500,6 +500,8 @@ next_page:
500 } 500 }
501 501
502out: 502out:
503 i915_gem_object_unpin_pages(obj);
504
503 if (hit_slowpath) { 505 if (hit_slowpath) {
504 /* Fixup: Kill any reinstated backing storage pages */ 506 /* Fixup: Kill any reinstated backing storage pages */
505 if (obj->madv == __I915_MADV_PURGED) 507 if (obj->madv == __I915_MADV_PURGED)
@@ -605,7 +607,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
605 char __user *user_data; 607 char __user *user_data;
606 int page_offset, page_length, ret; 608 int page_offset, page_length, ret;
607 609
608 ret = i915_gem_object_pin(obj, 0, true); 610 ret = i915_gem_object_pin(obj, 0, true, true);
609 if (ret) 611 if (ret)
610 goto out; 612 goto out;
611 613
@@ -685,7 +687,7 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
685 page_length); 687 page_length);
686 kunmap_atomic(vaddr); 688 kunmap_atomic(vaddr);
687 689
688 return ret; 690 return ret ? -EFAULT : 0;
689} 691}
690 692
691/* Only difference to the fast-path function is that this can handle bit17 693/* Only difference to the fast-path function is that this can handle bit17
@@ -719,7 +721,7 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
719 page_do_bit17_swizzling); 721 page_do_bit17_swizzling);
720 kunmap(page); 722 kunmap(page);
721 723
722 return ret; 724 return ret ? -EFAULT : 0;
723} 725}
724 726
725static int 727static int
@@ -728,7 +730,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
728 struct drm_i915_gem_pwrite *args, 730 struct drm_i915_gem_pwrite *args,
729 struct drm_file *file) 731 struct drm_file *file)
730{ 732{
731 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
732 ssize_t remain; 733 ssize_t remain;
733 loff_t offset; 734 loff_t offset;
734 char __user *user_data; 735 char __user *user_data;
@@ -737,7 +738,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
737 int hit_slowpath = 0; 738 int hit_slowpath = 0;
738 int needs_clflush_after = 0; 739 int needs_clflush_after = 0;
739 int needs_clflush_before = 0; 740 int needs_clflush_before = 0;
740 int release_page; 741 int i;
742 struct scatterlist *sg;
741 743
742 user_data = (char __user *) (uintptr_t) args->data_ptr; 744 user_data = (char __user *) (uintptr_t) args->data_ptr;
743 remain = args->size; 745 remain = args->size;
@@ -751,9 +753,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
751 * right away and we therefore have to clflush anyway. */ 753 * right away and we therefore have to clflush anyway. */
752 if (obj->cache_level == I915_CACHE_NONE) 754 if (obj->cache_level == I915_CACHE_NONE)
753 needs_clflush_after = 1; 755 needs_clflush_after = 1;
754 ret = i915_gem_object_set_to_gtt_domain(obj, true); 756 if (obj->gtt_space) {
755 if (ret) 757 ret = i915_gem_object_set_to_gtt_domain(obj, true);
756 return ret; 758 if (ret)
759 return ret;
760 }
757 } 761 }
758 /* Same trick applies for invalidate partially written cachelines before 762 /* Same trick applies for invalidate partially written cachelines before
759 * writing. */ 763 * writing. */
@@ -761,13 +765,25 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
761 && obj->cache_level == I915_CACHE_NONE) 765 && obj->cache_level == I915_CACHE_NONE)
762 needs_clflush_before = 1; 766 needs_clflush_before = 1;
763 767
768 ret = i915_gem_object_get_pages(obj);
769 if (ret)
770 return ret;
771
772 i915_gem_object_pin_pages(obj);
773
764 offset = args->offset; 774 offset = args->offset;
765 obj->dirty = 1; 775 obj->dirty = 1;
766 776
767 while (remain > 0) { 777 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
768 struct page *page; 778 struct page *page;
769 int partial_cacheline_write; 779 int partial_cacheline_write;
770 780
781 if (i < offset >> PAGE_SHIFT)
782 continue;
783
784 if (remain <= 0)
785 break;
786
771 /* Operation in this page 787 /* Operation in this page
772 * 788 *
773 * shmem_page_offset = offset within page in shmem file 789 * shmem_page_offset = offset within page in shmem file
@@ -786,18 +802,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
786 ((shmem_page_offset | page_length) 802 ((shmem_page_offset | page_length)
787 & (boot_cpu_data.x86_clflush_size - 1)); 803 & (boot_cpu_data.x86_clflush_size - 1));
788 804
789 if (obj->pages) { 805 page = sg_page(sg);
790 page = obj->pages[offset >> PAGE_SHIFT];
791 release_page = 0;
792 } else {
793 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
794 if (IS_ERR(page)) {
795 ret = PTR_ERR(page);
796 goto out;
797 }
798 release_page = 1;
799 }
800
801 page_do_bit17_swizzling = obj_do_bit17_swizzling && 806 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
802 (page_to_phys(page) & (1 << 17)) != 0; 807 (page_to_phys(page) & (1 << 17)) != 0;
803 808
@@ -809,26 +814,20 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
809 goto next_page; 814 goto next_page;
810 815
811 hit_slowpath = 1; 816 hit_slowpath = 1;
812 page_cache_get(page);
813 mutex_unlock(&dev->struct_mutex); 817 mutex_unlock(&dev->struct_mutex);
814
815 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, 818 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
816 user_data, page_do_bit17_swizzling, 819 user_data, page_do_bit17_swizzling,
817 partial_cacheline_write, 820 partial_cacheline_write,
818 needs_clflush_after); 821 needs_clflush_after);
819 822
820 mutex_lock(&dev->struct_mutex); 823 mutex_lock(&dev->struct_mutex);
821 page_cache_release(page); 824
822next_page: 825next_page:
823 set_page_dirty(page); 826 set_page_dirty(page);
824 mark_page_accessed(page); 827 mark_page_accessed(page);
825 if (release_page)
826 page_cache_release(page);
827 828
828 if (ret) { 829 if (ret)
829 ret = -EFAULT;
830 goto out; 830 goto out;
831 }
832 831
833 remain -= page_length; 832 remain -= page_length;
834 user_data += page_length; 833 user_data += page_length;
@@ -836,6 +835,8 @@ next_page:
836 } 835 }
837 836
838out: 837out:
838 i915_gem_object_unpin_pages(obj);
839
839 if (hit_slowpath) { 840 if (hit_slowpath) {
840 /* Fixup: Kill any reinstated backing storage pages */ 841 /* Fixup: Kill any reinstated backing storage pages */
841 if (obj->madv == __I915_MADV_PURGED) 842 if (obj->madv == __I915_MADV_PURGED)
@@ -919,10 +920,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
919 goto out; 920 goto out;
920 } 921 }
921 922
922 if (obj->gtt_space && 923 if (obj->cache_level == I915_CACHE_NONE &&
923 obj->cache_level == I915_CACHE_NONE &&
924 obj->tiling_mode == I915_TILING_NONE && 924 obj->tiling_mode == I915_TILING_NONE &&
925 obj->map_and_fenceable &&
926 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 925 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
927 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 926 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
928 /* Note that the gtt paths might fail with non-page-backed user 927 /* Note that the gtt paths might fail with non-page-backed user
@@ -930,7 +929,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
930 * textures). Fallback to the shmem path in that case. */ 929 * textures). Fallback to the shmem path in that case. */
931 } 930 }
932 931
933 if (ret == -EFAULT) 932 if (ret == -EFAULT || ret == -ENOSPC)
934 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 933 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
935 934
936out: 935out:
@@ -940,6 +939,240 @@ unlock:
940 return ret; 939 return ret;
941} 940}
942 941
942int
943i915_gem_check_wedge(struct drm_i915_private *dev_priv,
944 bool interruptible)
945{
946 if (atomic_read(&dev_priv->mm.wedged)) {
947 struct completion *x = &dev_priv->error_completion;
948 bool recovery_complete;
949 unsigned long flags;
950
951 /* Give the error handler a chance to run. */
952 spin_lock_irqsave(&x->wait.lock, flags);
953 recovery_complete = x->done > 0;
954 spin_unlock_irqrestore(&x->wait.lock, flags);
955
956 /* Non-interruptible callers can't handle -EAGAIN, hence return
957 * -EIO unconditionally for these. */
958 if (!interruptible)
959 return -EIO;
960
961 /* Recovery complete, but still wedged means reset failure. */
962 if (recovery_complete)
963 return -EIO;
964
965 return -EAGAIN;
966 }
967
968 return 0;
969}
970
971/*
972 * Compare seqno against outstanding lazy request. Emit a request if they are
973 * equal.
974 */
975static int
976i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
977{
978 int ret;
979
980 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
981
982 ret = 0;
983 if (seqno == ring->outstanding_lazy_request)
984 ret = i915_add_request(ring, NULL, NULL);
985
986 return ret;
987}
988
989/**
990 * __wait_seqno - wait until execution of seqno has finished
991 * @ring: the ring expected to report seqno
992 * @seqno: duh!
993 * @interruptible: do an interruptible wait (normally yes)
994 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 *
996 * Returns 0 if the seqno was found within the alloted time. Else returns the
997 * errno with remaining time filled in timeout argument.
998 */
999static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1000 bool interruptible, struct timespec *timeout)
1001{
1002 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1003 struct timespec before, now, wait_time={1,0};
1004 unsigned long timeout_jiffies;
1005 long end;
1006 bool wait_forever = true;
1007 int ret;
1008
1009 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1010 return 0;
1011
1012 trace_i915_gem_request_wait_begin(ring, seqno);
1013
1014 if (timeout != NULL) {
1015 wait_time = *timeout;
1016 wait_forever = false;
1017 }
1018
1019 timeout_jiffies = timespec_to_jiffies(&wait_time);
1020
1021 if (WARN_ON(!ring->irq_get(ring)))
1022 return -ENODEV;
1023
1024 /* Record current time in case interrupted by signal, or wedged * */
1025 getrawmonotonic(&before);
1026
1027#define EXIT_COND \
1028 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029 atomic_read(&dev_priv->mm.wedged))
1030 do {
1031 if (interruptible)
1032 end = wait_event_interruptible_timeout(ring->irq_queue,
1033 EXIT_COND,
1034 timeout_jiffies);
1035 else
1036 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037 timeout_jiffies);
1038
1039 ret = i915_gem_check_wedge(dev_priv, interruptible);
1040 if (ret)
1041 end = ret;
1042 } while (end == 0 && wait_forever);
1043
1044 getrawmonotonic(&now);
1045
1046 ring->irq_put(ring);
1047 trace_i915_gem_request_wait_end(ring, seqno);
1048#undef EXIT_COND
1049
1050 if (timeout) {
1051 struct timespec sleep_time = timespec_sub(now, before);
1052 *timeout = timespec_sub(*timeout, sleep_time);
1053 }
1054
1055 switch (end) {
1056 case -EIO:
1057 case -EAGAIN: /* Wedged */
1058 case -ERESTARTSYS: /* Signal */
1059 return (int)end;
1060 case 0: /* Timeout */
1061 if (timeout)
1062 set_normalized_timespec(timeout, 0, 0);
1063 return -ETIME;
1064 default: /* Completed */
1065 WARN_ON(end < 0); /* We're not aware of other errors */
1066 return 0;
1067 }
1068}
1069
1070/**
1071 * Waits for a sequence number to be signaled, and cleans up the
1072 * request and object lists appropriately for that event.
1073 */
1074int
1075i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1076{
1077 struct drm_device *dev = ring->dev;
1078 struct drm_i915_private *dev_priv = dev->dev_private;
1079 bool interruptible = dev_priv->mm.interruptible;
1080 int ret;
1081
1082 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1083 BUG_ON(seqno == 0);
1084
1085 ret = i915_gem_check_wedge(dev_priv, interruptible);
1086 if (ret)
1087 return ret;
1088
1089 ret = i915_gem_check_olr(ring, seqno);
1090 if (ret)
1091 return ret;
1092
1093 return __wait_seqno(ring, seqno, interruptible, NULL);
1094}
1095
1096/**
1097 * Ensures that all rendering to the object has completed and the object is
1098 * safe to unbind from the GTT or access from the CPU.
1099 */
1100static __must_check int
1101i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1102 bool readonly)
1103{
1104 struct intel_ring_buffer *ring = obj->ring;
1105 u32 seqno;
1106 int ret;
1107
1108 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1109 if (seqno == 0)
1110 return 0;
1111
1112 ret = i915_wait_seqno(ring, seqno);
1113 if (ret)
1114 return ret;
1115
1116 i915_gem_retire_requests_ring(ring);
1117
1118 /* Manually manage the write flush as we may have not yet
1119 * retired the buffer.
1120 */
1121 if (obj->last_write_seqno &&
1122 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1123 obj->last_write_seqno = 0;
1124 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1125 }
1126
1127 return 0;
1128}
1129
1130/* A nonblocking variant of the above wait. This is a highly dangerous routine
1131 * as the object state may change during this call.
1132 */
1133static __must_check int
1134i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1135 bool readonly)
1136{
1137 struct drm_device *dev = obj->base.dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139 struct intel_ring_buffer *ring = obj->ring;
1140 u32 seqno;
1141 int ret;
1142
1143 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1144 BUG_ON(!dev_priv->mm.interruptible);
1145
1146 seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1147 if (seqno == 0)
1148 return 0;
1149
1150 ret = i915_gem_check_wedge(dev_priv, true);
1151 if (ret)
1152 return ret;
1153
1154 ret = i915_gem_check_olr(ring, seqno);
1155 if (ret)
1156 return ret;
1157
1158 mutex_unlock(&dev->struct_mutex);
1159 ret = __wait_seqno(ring, seqno, true, NULL);
1160 mutex_lock(&dev->struct_mutex);
1161
1162 i915_gem_retire_requests_ring(ring);
1163
1164 /* Manually manage the write flush as we may have not yet
1165 * retired the buffer.
1166 */
1167 if (obj->last_write_seqno &&
1168 i915_seqno_passed(seqno, obj->last_write_seqno)) {
1169 obj->last_write_seqno = 0;
1170 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1171 }
1172
1173 return ret;
1174}
1175
943/** 1176/**
944 * Called when user space prepares to use an object with the CPU, either 1177 * Called when user space prepares to use an object with the CPU, either
945 * through the mmap ioctl's mapping or a GTT mapping. 1178 * through the mmap ioctl's mapping or a GTT mapping.
@@ -977,6 +1210,14 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
977 goto unlock; 1210 goto unlock;
978 } 1211 }
979 1212
1213 /* Try to flush the object off the GPU without holding the lock.
1214 * We will repeat the flush holding the lock in the normal manner
1215 * to catch cases where we are gazumped.
1216 */
1217 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1218 if (ret)
1219 goto unref;
1220
980 if (read_domains & I915_GEM_DOMAIN_GTT) { 1221 if (read_domains & I915_GEM_DOMAIN_GTT) {
981 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1222 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
982 1223
@@ -990,6 +1231,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
990 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1231 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
991 } 1232 }
992 1233
1234unref:
993 drm_gem_object_unreference(&obj->base); 1235 drm_gem_object_unreference(&obj->base);
994unlock: 1236unlock:
995 mutex_unlock(&dev->struct_mutex); 1237 mutex_unlock(&dev->struct_mutex);
@@ -1109,7 +1351,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1109 goto unlock; 1351 goto unlock;
1110 } 1352 }
1111 if (!obj->gtt_space) { 1353 if (!obj->gtt_space) {
1112 ret = i915_gem_object_bind_to_gtt(obj, 0, true); 1354 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1113 if (ret) 1355 if (ret)
1114 goto unlock; 1356 goto unlock;
1115 1357
@@ -1270,6 +1512,42 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1270 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1512 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1271} 1513}
1272 1514
1515static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1516{
1517 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1518 int ret;
1519
1520 if (obj->base.map_list.map)
1521 return 0;
1522
1523 ret = drm_gem_create_mmap_offset(&obj->base);
1524 if (ret != -ENOSPC)
1525 return ret;
1526
1527 /* Badly fragmented mmap space? The only way we can recover
1528 * space is by destroying unwanted objects. We can't randomly release
1529 * mmap_offsets as userspace expects them to be persistent for the
1530 * lifetime of the objects. The closest we can is to release the
1531 * offsets on purgeable objects by truncating it and marking it purged,
1532 * which prevents userspace from ever using that object again.
1533 */
1534 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1535 ret = drm_gem_create_mmap_offset(&obj->base);
1536 if (ret != -ENOSPC)
1537 return ret;
1538
1539 i915_gem_shrink_all(dev_priv);
1540 return drm_gem_create_mmap_offset(&obj->base);
1541}
1542
1543static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1544{
1545 if (!obj->base.map_list.map)
1546 return;
1547
1548 drm_gem_free_mmap_offset(&obj->base);
1549}
1550
1273int 1551int
1274i915_gem_mmap_gtt(struct drm_file *file, 1552i915_gem_mmap_gtt(struct drm_file *file,
1275 struct drm_device *dev, 1553 struct drm_device *dev,
@@ -1301,11 +1579,9 @@ i915_gem_mmap_gtt(struct drm_file *file,
1301 goto out; 1579 goto out;
1302 } 1580 }
1303 1581
1304 if (!obj->base.map_list.map) { 1582 ret = i915_gem_object_create_mmap_offset(obj);
1305 ret = drm_gem_create_mmap_offset(&obj->base); 1583 if (ret)
1306 if (ret) 1584 goto out;
1307 goto out;
1308 }
1309 1585
1310 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; 1586 *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1311 1587
@@ -1340,83 +1616,245 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1340 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1616 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1341} 1617}
1342 1618
1343int 1619/* Immediately discard the backing storage */
1344i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, 1620static void
1345 gfp_t gfpmask) 1621i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1346{ 1622{
1347 int page_count, i;
1348 struct address_space *mapping;
1349 struct inode *inode; 1623 struct inode *inode;
1350 struct page *page;
1351 1624
1352 if (obj->pages || obj->sg_table) 1625 i915_gem_object_free_mmap_offset(obj);
1353 return 0;
1354 1626
1355 /* Get the list of pages out of our struct file. They'll be pinned 1627 if (obj->base.filp == NULL)
1356 * at this point until we release them. 1628 return;
1357 */
1358 page_count = obj->base.size / PAGE_SIZE;
1359 BUG_ON(obj->pages != NULL);
1360 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1361 if (obj->pages == NULL)
1362 return -ENOMEM;
1363 1629
1630 /* Our goal here is to return as much of the memory as
1631 * is possible back to the system as we are called from OOM.
1632 * To do this we must instruct the shmfs to drop all of its
1633 * backing pages, *now*.
1634 */
1364 inode = obj->base.filp->f_path.dentry->d_inode; 1635 inode = obj->base.filp->f_path.dentry->d_inode;
1365 mapping = inode->i_mapping; 1636 shmem_truncate_range(inode, 0, (loff_t)-1);
1366 gfpmask |= mapping_gfp_mask(mapping);
1367
1368 for (i = 0; i < page_count; i++) {
1369 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
1370 if (IS_ERR(page))
1371 goto err_pages;
1372
1373 obj->pages[i] = page;
1374 }
1375
1376 if (i915_gem_object_needs_bit17_swizzle(obj))
1377 i915_gem_object_do_bit_17_swizzle(obj);
1378
1379 return 0;
1380 1637
1381err_pages: 1638 obj->madv = __I915_MADV_PURGED;
1382 while (i--) 1639}
1383 page_cache_release(obj->pages[i]);
1384 1640
1385 drm_free_large(obj->pages); 1641static inline int
1386 obj->pages = NULL; 1642i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1387 return PTR_ERR(page); 1643{
1644 return obj->madv == I915_MADV_DONTNEED;
1388} 1645}
1389 1646
1390static void 1647static void
1391i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1648i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1392{ 1649{
1393 int page_count = obj->base.size / PAGE_SIZE; 1650 int page_count = obj->base.size / PAGE_SIZE;
1394 int i; 1651 struct scatterlist *sg;
1395 1652 int ret, i;
1396 if (!obj->pages)
1397 return;
1398 1653
1399 BUG_ON(obj->madv == __I915_MADV_PURGED); 1654 BUG_ON(obj->madv == __I915_MADV_PURGED);
1400 1655
1656 ret = i915_gem_object_set_to_cpu_domain(obj, true);
1657 if (ret) {
1658 /* In the event of a disaster, abandon all caches and
1659 * hope for the best.
1660 */
1661 WARN_ON(ret != -EIO);
1662 i915_gem_clflush_object(obj);
1663 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1664 }
1665
1401 if (i915_gem_object_needs_bit17_swizzle(obj)) 1666 if (i915_gem_object_needs_bit17_swizzle(obj))
1402 i915_gem_object_save_bit_17_swizzle(obj); 1667 i915_gem_object_save_bit_17_swizzle(obj);
1403 1668
1404 if (obj->madv == I915_MADV_DONTNEED) 1669 if (obj->madv == I915_MADV_DONTNEED)
1405 obj->dirty = 0; 1670 obj->dirty = 0;
1406 1671
1407 for (i = 0; i < page_count; i++) { 1672 for_each_sg(obj->pages->sgl, sg, page_count, i) {
1673 struct page *page = sg_page(sg);
1674
1408 if (obj->dirty) 1675 if (obj->dirty)
1409 set_page_dirty(obj->pages[i]); 1676 set_page_dirty(page);
1410 1677
1411 if (obj->madv == I915_MADV_WILLNEED) 1678 if (obj->madv == I915_MADV_WILLNEED)
1412 mark_page_accessed(obj->pages[i]); 1679 mark_page_accessed(page);
1413 1680
1414 page_cache_release(obj->pages[i]); 1681 page_cache_release(page);
1415 } 1682 }
1416 obj->dirty = 0; 1683 obj->dirty = 0;
1417 1684
1418 drm_free_large(obj->pages); 1685 sg_free_table(obj->pages);
1686 kfree(obj->pages);
1687}
1688
1689static int
1690i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1691{
1692 const struct drm_i915_gem_object_ops *ops = obj->ops;
1693
1694 if (obj->pages == NULL)
1695 return 0;
1696
1697 BUG_ON(obj->gtt_space);
1698
1699 if (obj->pages_pin_count)
1700 return -EBUSY;
1701
1702 ops->put_pages(obj);
1419 obj->pages = NULL; 1703 obj->pages = NULL;
1704
1705 list_del(&obj->gtt_list);
1706 if (i915_gem_object_is_purgeable(obj))
1707 i915_gem_object_truncate(obj);
1708
1709 return 0;
1710}
1711
1712static long
1713i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1714{
1715 struct drm_i915_gem_object *obj, *next;
1716 long count = 0;
1717
1718 list_for_each_entry_safe(obj, next,
1719 &dev_priv->mm.unbound_list,
1720 gtt_list) {
1721 if (i915_gem_object_is_purgeable(obj) &&
1722 i915_gem_object_put_pages(obj) == 0) {
1723 count += obj->base.size >> PAGE_SHIFT;
1724 if (count >= target)
1725 return count;
1726 }
1727 }
1728
1729 list_for_each_entry_safe(obj, next,
1730 &dev_priv->mm.inactive_list,
1731 mm_list) {
1732 if (i915_gem_object_is_purgeable(obj) &&
1733 i915_gem_object_unbind(obj) == 0 &&
1734 i915_gem_object_put_pages(obj) == 0) {
1735 count += obj->base.size >> PAGE_SHIFT;
1736 if (count >= target)
1737 return count;
1738 }
1739 }
1740
1741 return count;
1742}
1743
1744static void
1745i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1746{
1747 struct drm_i915_gem_object *obj, *next;
1748
1749 i915_gem_evict_everything(dev_priv->dev);
1750
1751 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1752 i915_gem_object_put_pages(obj);
1753}
1754
1755static int
1756i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1757{
1758 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1759 int page_count, i;
1760 struct address_space *mapping;
1761 struct sg_table *st;
1762 struct scatterlist *sg;
1763 struct page *page;
1764 gfp_t gfp;
1765
1766 /* Assert that the object is not currently in any GPU domain. As it
1767 * wasn't in the GTT, there shouldn't be any way it could have been in
1768 * a GPU cache
1769 */
1770 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1771 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1772
1773 st = kmalloc(sizeof(*st), GFP_KERNEL);
1774 if (st == NULL)
1775 return -ENOMEM;
1776
1777 page_count = obj->base.size / PAGE_SIZE;
1778 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1779 sg_free_table(st);
1780 kfree(st);
1781 return -ENOMEM;
1782 }
1783
1784 /* Get the list of pages out of our struct file. They'll be pinned
1785 * at this point until we release them.
1786 *
1787 * Fail silently without starting the shrinker
1788 */
1789 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1790 gfp = mapping_gfp_mask(mapping);
1791 gfp |= __GFP_NORETRY | __GFP_NOWARN;
1792 gfp &= ~(__GFP_IO | __GFP_WAIT);
1793 for_each_sg(st->sgl, sg, page_count, i) {
1794 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1795 if (IS_ERR(page)) {
1796 i915_gem_purge(dev_priv, page_count);
1797 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1798 }
1799 if (IS_ERR(page)) {
1800 /* We've tried hard to allocate the memory by reaping
1801 * our own buffer, now let the real VM do its job and
1802 * go down in flames if truly OOM.
1803 */
1804 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN);
1805 gfp |= __GFP_IO | __GFP_WAIT;
1806
1807 i915_gem_shrink_all(dev_priv);
1808 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1809 if (IS_ERR(page))
1810 goto err_pages;
1811
1812 gfp |= __GFP_NORETRY | __GFP_NOWARN;
1813 gfp &= ~(__GFP_IO | __GFP_WAIT);
1814 }
1815
1816 sg_set_page(sg, page, PAGE_SIZE, 0);
1817 }
1818
1819 if (i915_gem_object_needs_bit17_swizzle(obj))
1820 i915_gem_object_do_bit_17_swizzle(obj);
1821
1822 obj->pages = st;
1823 return 0;
1824
1825err_pages:
1826 for_each_sg(st->sgl, sg, i, page_count)
1827 page_cache_release(sg_page(sg));
1828 sg_free_table(st);
1829 kfree(st);
1830 return PTR_ERR(page);
1831}
1832
1833/* Ensure that the associated pages are gathered from the backing storage
1834 * and pinned into our object. i915_gem_object_get_pages() may be called
1835 * multiple times before they are released by a single call to
1836 * i915_gem_object_put_pages() - once the pages are no longer referenced
1837 * either as a result of memory pressure (reaping pages under the shrinker)
1838 * or as the object is itself released.
1839 */
1840int
1841i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1842{
1843 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1844 const struct drm_i915_gem_object_ops *ops = obj->ops;
1845 int ret;
1846
1847 if (obj->pages)
1848 return 0;
1849
1850 BUG_ON(obj->pages_pin_count);
1851
1852 ret = ops->get_pages(obj);
1853 if (ret)
1854 return ret;
1855
1856 list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1857 return 0;
1420} 1858}
1421 1859
1422void 1860void
@@ -1440,7 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1440 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); 1878 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1441 list_move_tail(&obj->ring_list, &ring->active_list); 1879 list_move_tail(&obj->ring_list, &ring->active_list);
1442 1880
1443 obj->last_rendering_seqno = seqno; 1881 obj->last_read_seqno = seqno;
1444 1882
1445 if (obj->fenced_gpu_access) { 1883 if (obj->fenced_gpu_access) {
1446 obj->last_fenced_seqno = seqno; 1884 obj->last_fenced_seqno = seqno;
@@ -1457,97 +1895,35 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1457} 1895}
1458 1896
1459static void 1897static void
1460i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) 1898i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1461{
1462 list_del_init(&obj->ring_list);
1463 obj->last_rendering_seqno = 0;
1464 obj->last_fenced_seqno = 0;
1465}
1466
1467static void
1468i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1469{ 1899{
1470 struct drm_device *dev = obj->base.dev; 1900 struct drm_device *dev = obj->base.dev;
1471 drm_i915_private_t *dev_priv = dev->dev_private; 1901 struct drm_i915_private *dev_priv = dev->dev_private;
1472 1902
1903 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1473 BUG_ON(!obj->active); 1904 BUG_ON(!obj->active);
1474 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1475
1476 i915_gem_object_move_off_active(obj);
1477}
1478 1905
1479static void 1906 if (obj->pin_count) /* are we a framebuffer? */
1480i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 1907 intel_mark_fb_idle(obj);
1481{
1482 struct drm_device *dev = obj->base.dev;
1483 struct drm_i915_private *dev_priv = dev->dev_private;
1484 1908
1485 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1909 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1486 1910
1487 BUG_ON(!list_empty(&obj->gpu_write_list)); 1911 list_del_init(&obj->ring_list);
1488 BUG_ON(!obj->active);
1489 obj->ring = NULL; 1912 obj->ring = NULL;
1490 1913
1491 i915_gem_object_move_off_active(obj); 1914 obj->last_read_seqno = 0;
1915 obj->last_write_seqno = 0;
1916 obj->base.write_domain = 0;
1917
1918 obj->last_fenced_seqno = 0;
1492 obj->fenced_gpu_access = false; 1919 obj->fenced_gpu_access = false;
1493 1920
1494 obj->active = 0; 1921 obj->active = 0;
1495 obj->pending_gpu_write = false;
1496 drm_gem_object_unreference(&obj->base); 1922 drm_gem_object_unreference(&obj->base);
1497 1923
1498 WARN_ON(i915_verify_lists(dev)); 1924 WARN_ON(i915_verify_lists(dev));
1499} 1925}
1500 1926
1501/* Immediately discard the backing storage */
1502static void
1503i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1504{
1505 struct inode *inode;
1506
1507 /* Our goal here is to return as much of the memory as
1508 * is possible back to the system as we are called from OOM.
1509 * To do this we must instruct the shmfs to drop all of its
1510 * backing pages, *now*.
1511 */
1512 inode = obj->base.filp->f_path.dentry->d_inode;
1513 shmem_truncate_range(inode, 0, (loff_t)-1);
1514
1515 if (obj->base.map_list.map)
1516 drm_gem_free_mmap_offset(&obj->base);
1517
1518 obj->madv = __I915_MADV_PURGED;
1519}
1520
1521static inline int
1522i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1523{
1524 return obj->madv == I915_MADV_DONTNEED;
1525}
1526
1527static void
1528i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
1529 uint32_t flush_domains)
1530{
1531 struct drm_i915_gem_object *obj, *next;
1532
1533 list_for_each_entry_safe(obj, next,
1534 &ring->gpu_write_list,
1535 gpu_write_list) {
1536 if (obj->base.write_domain & flush_domains) {
1537 uint32_t old_write_domain = obj->base.write_domain;
1538
1539 obj->base.write_domain = 0;
1540 list_del_init(&obj->gpu_write_list);
1541 i915_gem_object_move_to_active(obj, ring,
1542 i915_gem_next_request_seqno(ring));
1543
1544 trace_i915_gem_object_change_domain(obj,
1545 obj->base.read_domains,
1546 old_write_domain);
1547 }
1548 }
1549}
1550
1551static u32 1927static u32
1552i915_gem_get_seqno(struct drm_device *dev) 1928i915_gem_get_seqno(struct drm_device *dev)
1553{ 1929{
@@ -1588,15 +1964,16 @@ i915_add_request(struct intel_ring_buffer *ring,
1588 * is that the flush _must_ happen before the next request, no matter 1964 * is that the flush _must_ happen before the next request, no matter
1589 * what. 1965 * what.
1590 */ 1966 */
1591 if (ring->gpu_caches_dirty) { 1967 ret = intel_ring_flush_all_caches(ring);
1592 ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS); 1968 if (ret)
1593 if (ret) 1969 return ret;
1594 return ret;
1595 1970
1596 ring->gpu_caches_dirty = false; 1971 if (request == NULL) {
1972 request = kmalloc(sizeof(*request), GFP_KERNEL);
1973 if (request == NULL)
1974 return -ENOMEM;
1597 } 1975 }
1598 1976
1599 BUG_ON(request == NULL);
1600 seqno = i915_gem_next_request_seqno(ring); 1977 seqno = i915_gem_next_request_seqno(ring);
1601 1978
1602 /* Record the position of the start of the request so that 1979 /* Record the position of the start of the request so that
@@ -1607,8 +1984,10 @@ i915_add_request(struct intel_ring_buffer *ring,
1607 request_ring_position = intel_ring_get_tail(ring); 1984 request_ring_position = intel_ring_get_tail(ring);
1608 1985
1609 ret = ring->add_request(ring, &seqno); 1986 ret = ring->add_request(ring, &seqno);
1610 if (ret) 1987 if (ret) {
1611 return ret; 1988 kfree(request);
1989 return ret;
1990 }
1612 1991
1613 trace_i915_gem_request_add(ring, seqno); 1992 trace_i915_gem_request_add(ring, seqno);
1614 1993
@@ -1618,6 +1997,7 @@ i915_add_request(struct intel_ring_buffer *ring,
1618 request->emitted_jiffies = jiffies; 1997 request->emitted_jiffies = jiffies;
1619 was_empty = list_empty(&ring->request_list); 1998 was_empty = list_empty(&ring->request_list);
1620 list_add_tail(&request->list, &ring->request_list); 1999 list_add_tail(&request->list, &ring->request_list);
2000 request->file_priv = NULL;
1621 2001
1622 if (file) { 2002 if (file) {
1623 struct drm_i915_file_private *file_priv = file->driver_priv; 2003 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -1637,13 +2017,13 @@ i915_add_request(struct intel_ring_buffer *ring,
1637 jiffies + 2017 jiffies +
1638 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 2018 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1639 } 2019 }
1640 if (was_empty) 2020 if (was_empty) {
1641 queue_delayed_work(dev_priv->wq, 2021 queue_delayed_work(dev_priv->wq,
1642 &dev_priv->mm.retire_work, HZ); 2022 &dev_priv->mm.retire_work, HZ);
2023 intel_mark_busy(dev_priv->dev);
2024 }
1643 } 2025 }
1644 2026
1645 WARN_ON(!list_empty(&ring->gpu_write_list));
1646
1647 return 0; 2027 return 0;
1648} 2028}
1649 2029
@@ -1685,8 +2065,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1685 struct drm_i915_gem_object, 2065 struct drm_i915_gem_object,
1686 ring_list); 2066 ring_list);
1687 2067
1688 obj->base.write_domain = 0;
1689 list_del_init(&obj->gpu_write_list);
1690 i915_gem_object_move_to_inactive(obj); 2068 i915_gem_object_move_to_inactive(obj);
1691 } 2069 }
1692} 2070}
@@ -1722,20 +2100,6 @@ void i915_gem_reset(struct drm_device *dev)
1722 for_each_ring(ring, dev_priv, i) 2100 for_each_ring(ring, dev_priv, i)
1723 i915_gem_reset_ring_lists(dev_priv, ring); 2101 i915_gem_reset_ring_lists(dev_priv, ring);
1724 2102
1725 /* Remove anything from the flushing lists. The GPU cache is likely
1726 * to be lost on reset along with the data, so simply move the
1727 * lost bo to the inactive list.
1728 */
1729 while (!list_empty(&dev_priv->mm.flushing_list)) {
1730 obj = list_first_entry(&dev_priv->mm.flushing_list,
1731 struct drm_i915_gem_object,
1732 mm_list);
1733
1734 obj->base.write_domain = 0;
1735 list_del_init(&obj->gpu_write_list);
1736 i915_gem_object_move_to_inactive(obj);
1737 }
1738
1739 /* Move everything out of the GPU domains to ensure we do any 2103 /* Move everything out of the GPU domains to ensure we do any
1740 * necessary invalidation upon reuse. 2104 * necessary invalidation upon reuse.
1741 */ 2105 */
@@ -1764,7 +2128,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1764 2128
1765 WARN_ON(i915_verify_lists(ring->dev)); 2129 WARN_ON(i915_verify_lists(ring->dev));
1766 2130
1767 seqno = ring->get_seqno(ring); 2131 seqno = ring->get_seqno(ring, true);
1768 2132
1769 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) 2133 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
1770 if (seqno >= ring->sync_seqno[i]) 2134 if (seqno >= ring->sync_seqno[i])
@@ -1803,13 +2167,10 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
1803 struct drm_i915_gem_object, 2167 struct drm_i915_gem_object,
1804 ring_list); 2168 ring_list);
1805 2169
1806 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) 2170 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
1807 break; 2171 break;
1808 2172
1809 if (obj->base.write_domain != 0) 2173 i915_gem_object_move_to_inactive(obj);
1810 i915_gem_object_move_to_flushing(obj);
1811 else
1812 i915_gem_object_move_to_inactive(obj);
1813 } 2174 }
1814 2175
1815 if (unlikely(ring->trace_irq_seqno && 2176 if (unlikely(ring->trace_irq_seqno &&
@@ -1858,216 +2219,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
1858 */ 2219 */
1859 idle = true; 2220 idle = true;
1860 for_each_ring(ring, dev_priv, i) { 2221 for_each_ring(ring, dev_priv, i) {
1861 if (ring->gpu_caches_dirty) { 2222 if (ring->gpu_caches_dirty)
1862 struct drm_i915_gem_request *request; 2223 i915_add_request(ring, NULL, NULL);
1863
1864 request = kzalloc(sizeof(*request), GFP_KERNEL);
1865 if (request == NULL ||
1866 i915_add_request(ring, NULL, request))
1867 kfree(request);
1868 }
1869 2224
1870 idle &= list_empty(&ring->request_list); 2225 idle &= list_empty(&ring->request_list);
1871 } 2226 }
1872 2227
1873 if (!dev_priv->mm.suspended && !idle) 2228 if (!dev_priv->mm.suspended && !idle)
1874 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2229 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
2230 if (idle)
2231 intel_mark_idle(dev);
1875 2232
1876 mutex_unlock(&dev->struct_mutex); 2233 mutex_unlock(&dev->struct_mutex);
1877} 2234}
1878 2235
1879int
1880i915_gem_check_wedge(struct drm_i915_private *dev_priv,
1881 bool interruptible)
1882{
1883 if (atomic_read(&dev_priv->mm.wedged)) {
1884 struct completion *x = &dev_priv->error_completion;
1885 bool recovery_complete;
1886 unsigned long flags;
1887
1888 /* Give the error handler a chance to run. */
1889 spin_lock_irqsave(&x->wait.lock, flags);
1890 recovery_complete = x->done > 0;
1891 spin_unlock_irqrestore(&x->wait.lock, flags);
1892
1893 /* Non-interruptible callers can't handle -EAGAIN, hence return
1894 * -EIO unconditionally for these. */
1895 if (!interruptible)
1896 return -EIO;
1897
1898 /* Recovery complete, but still wedged means reset failure. */
1899 if (recovery_complete)
1900 return -EIO;
1901
1902 return -EAGAIN;
1903 }
1904
1905 return 0;
1906}
1907
1908/*
1909 * Compare seqno against outstanding lazy request. Emit a request if they are
1910 * equal.
1911 */
1912static int
1913i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1914{
1915 int ret = 0;
1916
1917 BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1918
1919 if (seqno == ring->outstanding_lazy_request) {
1920 struct drm_i915_gem_request *request;
1921
1922 request = kzalloc(sizeof(*request), GFP_KERNEL);
1923 if (request == NULL)
1924 return -ENOMEM;
1925
1926 ret = i915_add_request(ring, NULL, request);
1927 if (ret) {
1928 kfree(request);
1929 return ret;
1930 }
1931
1932 BUG_ON(seqno != request->seqno);
1933 }
1934
1935 return ret;
1936}
1937
1938/**
1939 * __wait_seqno - wait until execution of seqno has finished
1940 * @ring: the ring expected to report seqno
1941 * @seqno: duh!
1942 * @interruptible: do an interruptible wait (normally yes)
1943 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1944 *
1945 * Returns 0 if the seqno was found within the alloted time. Else returns the
1946 * errno with remaining time filled in timeout argument.
1947 */
1948static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1949 bool interruptible, struct timespec *timeout)
1950{
1951 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1952 struct timespec before, now, wait_time={1,0};
1953 unsigned long timeout_jiffies;
1954 long end;
1955 bool wait_forever = true;
1956 int ret;
1957
1958 if (i915_seqno_passed(ring->get_seqno(ring), seqno))
1959 return 0;
1960
1961 trace_i915_gem_request_wait_begin(ring, seqno);
1962
1963 if (timeout != NULL) {
1964 wait_time = *timeout;
1965 wait_forever = false;
1966 }
1967
1968 timeout_jiffies = timespec_to_jiffies(&wait_time);
1969
1970 if (WARN_ON(!ring->irq_get(ring)))
1971 return -ENODEV;
1972
1973 /* Record current time in case interrupted by signal, or wedged * */
1974 getrawmonotonic(&before);
1975
1976#define EXIT_COND \
1977 (i915_seqno_passed(ring->get_seqno(ring), seqno) || \
1978 atomic_read(&dev_priv->mm.wedged))
1979 do {
1980 if (interruptible)
1981 end = wait_event_interruptible_timeout(ring->irq_queue,
1982 EXIT_COND,
1983 timeout_jiffies);
1984 else
1985 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1986 timeout_jiffies);
1987
1988 ret = i915_gem_check_wedge(dev_priv, interruptible);
1989 if (ret)
1990 end = ret;
1991 } while (end == 0 && wait_forever);
1992
1993 getrawmonotonic(&now);
1994
1995 ring->irq_put(ring);
1996 trace_i915_gem_request_wait_end(ring, seqno);
1997#undef EXIT_COND
1998
1999 if (timeout) {
2000 struct timespec sleep_time = timespec_sub(now, before);
2001 *timeout = timespec_sub(*timeout, sleep_time);
2002 }
2003
2004 switch (end) {
2005 case -EIO:
2006 case -EAGAIN: /* Wedged */
2007 case -ERESTARTSYS: /* Signal */
2008 return (int)end;
2009 case 0: /* Timeout */
2010 if (timeout)
2011 set_normalized_timespec(timeout, 0, 0);
2012 return -ETIME;
2013 default: /* Completed */
2014 WARN_ON(end < 0); /* We're not aware of other errors */
2015 return 0;
2016 }
2017}
2018
2019/**
2020 * Waits for a sequence number to be signaled, and cleans up the
2021 * request and object lists appropriately for that event.
2022 */
2023int
2024i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
2025{
2026 drm_i915_private_t *dev_priv = ring->dev->dev_private;
2027 int ret = 0;
2028
2029 BUG_ON(seqno == 0);
2030
2031 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
2032 if (ret)
2033 return ret;
2034
2035 ret = i915_gem_check_olr(ring, seqno);
2036 if (ret)
2037 return ret;
2038
2039 ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
2040
2041 return ret;
2042}
2043
2044/**
2045 * Ensures that all rendering to the object has completed and the object is
2046 * safe to unbind from the GTT or access from the CPU.
2047 */
2048int
2049i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2050{
2051 int ret;
2052
2053 /* This function only exists to support waiting for existing rendering,
2054 * not for emitting required flushes.
2055 */
2056 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2057
2058 /* If there is rendering queued on the buffer being evicted, wait for
2059 * it.
2060 */
2061 if (obj->active) {
2062 ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
2063 if (ret)
2064 return ret;
2065 i915_gem_retire_requests_ring(obj->ring);
2066 }
2067
2068 return 0;
2069}
2070
2071/** 2236/**
2072 * Ensures that an object will eventually get non-busy by flushing any required 2237 * Ensures that an object will eventually get non-busy by flushing any required
2073 * write domains, emitting any outstanding lazy request and retiring and 2238 * write domains, emitting any outstanding lazy request and retiring and
@@ -2079,14 +2244,10 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2079 int ret; 2244 int ret;
2080 2245
2081 if (obj->active) { 2246 if (obj->active) {
2082 ret = i915_gem_object_flush_gpu_write_domain(obj); 2247 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2083 if (ret) 2248 if (ret)
2084 return ret; 2249 return ret;
2085 2250
2086 ret = i915_gem_check_olr(obj->ring,
2087 obj->last_rendering_seqno);
2088 if (ret)
2089 return ret;
2090 i915_gem_retire_requests_ring(obj->ring); 2251 i915_gem_retire_requests_ring(obj->ring);
2091 } 2252 }
2092 2253
@@ -2146,7 +2307,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2146 goto out; 2307 goto out;
2147 2308
2148 if (obj->active) { 2309 if (obj->active) {
2149 seqno = obj->last_rendering_seqno; 2310 seqno = obj->last_read_seqno;
2150 ring = obj->ring; 2311 ring = obj->ring;
2151 } 2312 }
2152 2313
@@ -2201,11 +2362,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2201 return 0; 2362 return 0;
2202 2363
2203 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev)) 2364 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2204 return i915_gem_object_wait_rendering(obj); 2365 return i915_gem_object_wait_rendering(obj, false);
2205 2366
2206 idx = intel_ring_sync_index(from, to); 2367 idx = intel_ring_sync_index(from, to);
2207 2368
2208 seqno = obj->last_rendering_seqno; 2369 seqno = obj->last_read_seqno;
2209 if (seqno <= from->sync_seqno[idx]) 2370 if (seqno <= from->sync_seqno[idx])
2210 return 0; 2371 return 0;
2211 2372
@@ -2259,6 +2420,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2259 if (obj->pin_count) 2420 if (obj->pin_count)
2260 return -EBUSY; 2421 return -EBUSY;
2261 2422
2423 BUG_ON(obj->pages == NULL);
2424
2262 ret = i915_gem_object_finish_gpu(obj); 2425 ret = i915_gem_object_finish_gpu(obj);
2263 if (ret) 2426 if (ret)
2264 return ret; 2427 return ret;
@@ -2269,22 +2432,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2269 2432
2270 i915_gem_object_finish_gtt(obj); 2433 i915_gem_object_finish_gtt(obj);
2271 2434
2272 /* Move the object to the CPU domain to ensure that
2273 * any possible CPU writes while it's not in the GTT
2274 * are flushed when we go to remap it.
2275 */
2276 if (ret == 0)
2277 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2278 if (ret == -ERESTARTSYS)
2279 return ret;
2280 if (ret) {
2281 /* In the event of a disaster, abandon all caches and
2282 * hope for the best.
2283 */
2284 i915_gem_clflush_object(obj);
2285 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2286 }
2287
2288 /* release the fence reg _after_ flushing */ 2435 /* release the fence reg _after_ flushing */
2289 ret = i915_gem_object_put_fence(obj); 2436 ret = i915_gem_object_put_fence(obj);
2290 if (ret) 2437 if (ret)
@@ -2300,10 +2447,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2300 } 2447 }
2301 i915_gem_gtt_finish_object(obj); 2448 i915_gem_gtt_finish_object(obj);
2302 2449
2303 i915_gem_object_put_pages_gtt(obj); 2450 list_del(&obj->mm_list);
2304 2451 list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2305 list_del_init(&obj->gtt_list);
2306 list_del_init(&obj->mm_list);
2307 /* Avoid an unnecessary call to unbind on rebind. */ 2452 /* Avoid an unnecessary call to unbind on rebind. */
2308 obj->map_and_fenceable = true; 2453 obj->map_and_fenceable = true;
2309 2454
@@ -2311,48 +2456,14 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2311 obj->gtt_space = NULL; 2456 obj->gtt_space = NULL;
2312 obj->gtt_offset = 0; 2457 obj->gtt_offset = 0;
2313 2458
2314 if (i915_gem_object_is_purgeable(obj))
2315 i915_gem_object_truncate(obj);
2316
2317 return ret;
2318}
2319
2320int
2321i915_gem_flush_ring(struct intel_ring_buffer *ring,
2322 uint32_t invalidate_domains,
2323 uint32_t flush_domains)
2324{
2325 int ret;
2326
2327 if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
2328 return 0;
2329
2330 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
2331
2332 ret = ring->flush(ring, invalidate_domains, flush_domains);
2333 if (ret)
2334 return ret;
2335
2336 if (flush_domains & I915_GEM_GPU_DOMAINS)
2337 i915_gem_process_flushing_list(ring, flush_domains);
2338
2339 return 0; 2459 return 0;
2340} 2460}
2341 2461
2342static int i915_ring_idle(struct intel_ring_buffer *ring) 2462static int i915_ring_idle(struct intel_ring_buffer *ring)
2343{ 2463{
2344 int ret; 2464 if (list_empty(&ring->active_list))
2345
2346 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2347 return 0; 2465 return 0;
2348 2466
2349 if (!list_empty(&ring->gpu_write_list)) {
2350 ret = i915_gem_flush_ring(ring,
2351 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2352 if (ret)
2353 return ret;
2354 }
2355
2356 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); 2467 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2357} 2468}
2358 2469
@@ -2371,10 +2482,6 @@ int i915_gpu_idle(struct drm_device *dev)
2371 ret = i915_ring_idle(ring); 2482 ret = i915_ring_idle(ring);
2372 if (ret) 2483 if (ret)
2373 return ret; 2484 return ret;
2374
2375 /* Is the device fubar? */
2376 if (WARN_ON(!list_empty(&ring->gpu_write_list)))
2377 return -EBUSY;
2378 } 2485 }
2379 2486
2380 return 0; 2487 return 0;
@@ -2547,21 +2654,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2547static int 2654static int
2548i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) 2655i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2549{ 2656{
2550 int ret;
2551
2552 if (obj->fenced_gpu_access) {
2553 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2554 ret = i915_gem_flush_ring(obj->ring,
2555 0, obj->base.write_domain);
2556 if (ret)
2557 return ret;
2558 }
2559
2560 obj->fenced_gpu_access = false;
2561 }
2562
2563 if (obj->last_fenced_seqno) { 2657 if (obj->last_fenced_seqno) {
2564 ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); 2658 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2565 if (ret) 2659 if (ret)
2566 return ret; 2660 return ret;
2567 2661
@@ -2574,6 +2668,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2574 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2668 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2575 mb(); 2669 mb();
2576 2670
2671 obj->fenced_gpu_access = false;
2577 return 0; 2672 return 0;
2578} 2673}
2579 2674
@@ -2693,18 +2788,88 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2693 return 0; 2788 return 0;
2694} 2789}
2695 2790
2791static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2792 struct drm_mm_node *gtt_space,
2793 unsigned long cache_level)
2794{
2795 struct drm_mm_node *other;
2796
2797 /* On non-LLC machines we have to be careful when putting differing
2798 * types of snoopable memory together to avoid the prefetcher
2799 * crossing memory domains and dieing.
2800 */
2801 if (HAS_LLC(dev))
2802 return true;
2803
2804 if (gtt_space == NULL)
2805 return true;
2806
2807 if (list_empty(&gtt_space->node_list))
2808 return true;
2809
2810 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2811 if (other->allocated && !other->hole_follows && other->color != cache_level)
2812 return false;
2813
2814 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2815 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2816 return false;
2817
2818 return true;
2819}
2820
2821static void i915_gem_verify_gtt(struct drm_device *dev)
2822{
2823#if WATCH_GTT
2824 struct drm_i915_private *dev_priv = dev->dev_private;
2825 struct drm_i915_gem_object *obj;
2826 int err = 0;
2827
2828 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2829 if (obj->gtt_space == NULL) {
2830 printk(KERN_ERR "object found on GTT list with no space reserved\n");
2831 err++;
2832 continue;
2833 }
2834
2835 if (obj->cache_level != obj->gtt_space->color) {
2836 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2837 obj->gtt_space->start,
2838 obj->gtt_space->start + obj->gtt_space->size,
2839 obj->cache_level,
2840 obj->gtt_space->color);
2841 err++;
2842 continue;
2843 }
2844
2845 if (!i915_gem_valid_gtt_space(dev,
2846 obj->gtt_space,
2847 obj->cache_level)) {
2848 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2849 obj->gtt_space->start,
2850 obj->gtt_space->start + obj->gtt_space->size,
2851 obj->cache_level);
2852 err++;
2853 continue;
2854 }
2855 }
2856
2857 WARN_ON(err);
2858#endif
2859}
2860
2696/** 2861/**
2697 * Finds free space in the GTT aperture and binds the object there. 2862 * Finds free space in the GTT aperture and binds the object there.
2698 */ 2863 */
2699static int 2864static int
2700i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 2865i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2701 unsigned alignment, 2866 unsigned alignment,
2702 bool map_and_fenceable) 2867 bool map_and_fenceable,
2868 bool nonblocking)
2703{ 2869{
2704 struct drm_device *dev = obj->base.dev; 2870 struct drm_device *dev = obj->base.dev;
2705 drm_i915_private_t *dev_priv = dev->dev_private; 2871 drm_i915_private_t *dev_priv = dev->dev_private;
2706 struct drm_mm_node *free_space; 2872 struct drm_mm_node *free_space;
2707 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2708 u32 size, fence_size, fence_alignment, unfenced_alignment; 2873 u32 size, fence_size, fence_alignment, unfenced_alignment;
2709 bool mappable, fenceable; 2874 bool mappable, fenceable;
2710 int ret; 2875 int ret;
@@ -2744,89 +2909,67 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2744 return -E2BIG; 2909 return -E2BIG;
2745 } 2910 }
2746 2911
2912 ret = i915_gem_object_get_pages(obj);
2913 if (ret)
2914 return ret;
2915
2747 search_free: 2916 search_free:
2748 if (map_and_fenceable) 2917 if (map_and_fenceable)
2749 free_space = 2918 free_space =
2750 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, 2919 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2751 size, alignment, 2920 size, alignment, obj->cache_level,
2752 0, dev_priv->mm.gtt_mappable_end, 2921 0, dev_priv->mm.gtt_mappable_end,
2753 0); 2922 false);
2754 else 2923 else
2755 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2924 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2756 size, alignment, 0); 2925 size, alignment, obj->cache_level,
2926 false);
2757 2927
2758 if (free_space != NULL) { 2928 if (free_space != NULL) {
2759 if (map_and_fenceable) 2929 if (map_and_fenceable)
2760 obj->gtt_space = 2930 obj->gtt_space =
2761 drm_mm_get_block_range_generic(free_space, 2931 drm_mm_get_block_range_generic(free_space,
2762 size, alignment, 0, 2932 size, alignment, obj->cache_level,
2763 0, dev_priv->mm.gtt_mappable_end, 2933 0, dev_priv->mm.gtt_mappable_end,
2764 0); 2934 false);
2765 else 2935 else
2766 obj->gtt_space = 2936 obj->gtt_space =
2767 drm_mm_get_block(free_space, size, alignment); 2937 drm_mm_get_block_generic(free_space,
2938 size, alignment, obj->cache_level,
2939 false);
2768 } 2940 }
2769 if (obj->gtt_space == NULL) { 2941 if (obj->gtt_space == NULL) {
2770 /* If the gtt is empty and we're still having trouble
2771 * fitting our object in, we're out of memory.
2772 */
2773 ret = i915_gem_evict_something(dev, size, alignment, 2942 ret = i915_gem_evict_something(dev, size, alignment,
2774 map_and_fenceable); 2943 obj->cache_level,
2944 map_and_fenceable,
2945 nonblocking);
2775 if (ret) 2946 if (ret)
2776 return ret; 2947 return ret;
2777 2948
2778 goto search_free; 2949 goto search_free;
2779 } 2950 }
2780 2951 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2781 ret = i915_gem_object_get_pages_gtt(obj, gfpmask); 2952 obj->gtt_space,
2782 if (ret) { 2953 obj->cache_level))) {
2783 drm_mm_put_block(obj->gtt_space); 2954 drm_mm_put_block(obj->gtt_space);
2784 obj->gtt_space = NULL; 2955 obj->gtt_space = NULL;
2785 2956 return -EINVAL;
2786 if (ret == -ENOMEM) {
2787 /* first try to reclaim some memory by clearing the GTT */
2788 ret = i915_gem_evict_everything(dev, false);
2789 if (ret) {
2790 /* now try to shrink everyone else */
2791 if (gfpmask) {
2792 gfpmask = 0;
2793 goto search_free;
2794 }
2795
2796 return -ENOMEM;
2797 }
2798
2799 goto search_free;
2800 }
2801
2802 return ret;
2803 } 2957 }
2804 2958
2959
2805 ret = i915_gem_gtt_prepare_object(obj); 2960 ret = i915_gem_gtt_prepare_object(obj);
2806 if (ret) { 2961 if (ret) {
2807 i915_gem_object_put_pages_gtt(obj);
2808 drm_mm_put_block(obj->gtt_space); 2962 drm_mm_put_block(obj->gtt_space);
2809 obj->gtt_space = NULL; 2963 obj->gtt_space = NULL;
2810 2964 return ret;
2811 if (i915_gem_evict_everything(dev, false))
2812 return ret;
2813
2814 goto search_free;
2815 } 2965 }
2816 2966
2817 if (!dev_priv->mm.aliasing_ppgtt) 2967 if (!dev_priv->mm.aliasing_ppgtt)
2818 i915_gem_gtt_bind_object(obj, obj->cache_level); 2968 i915_gem_gtt_bind_object(obj, obj->cache_level);
2819 2969
2820 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); 2970 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2821 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2971 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2822 2972
2823 /* Assert that the object is not currently in any GPU domain. As it
2824 * wasn't in the GTT, there shouldn't be any way it could have been in
2825 * a GPU cache
2826 */
2827 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2828 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2829
2830 obj->gtt_offset = obj->gtt_space->start; 2973 obj->gtt_offset = obj->gtt_space->start;
2831 2974
2832 fenceable = 2975 fenceable =
@@ -2839,6 +2982,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2839 obj->map_and_fenceable = mappable && fenceable; 2982 obj->map_and_fenceable = mappable && fenceable;
2840 2983
2841 trace_i915_gem_object_bind(obj, map_and_fenceable); 2984 trace_i915_gem_object_bind(obj, map_and_fenceable);
2985 i915_gem_verify_gtt(dev);
2842 return 0; 2986 return 0;
2843} 2987}
2844 2988
@@ -2865,18 +3009,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2865 3009
2866 trace_i915_gem_object_clflush(obj); 3010 trace_i915_gem_object_clflush(obj);
2867 3011
2868 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 3012 drm_clflush_sg(obj->pages);
2869}
2870
2871/** Flushes any GPU write domain for the object if it's dirty. */
2872static int
2873i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2874{
2875 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2876 return 0;
2877
2878 /* Queue the GPU write cache flushing we need. */
2879 return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
2880} 3013}
2881 3014
2882/** Flushes the GTT write domain for the object if it's dirty. */ 3015/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2945,16 +3078,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2945 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3078 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
2946 return 0; 3079 return 0;
2947 3080
2948 ret = i915_gem_object_flush_gpu_write_domain(obj); 3081 ret = i915_gem_object_wait_rendering(obj, !write);
2949 if (ret) 3082 if (ret)
2950 return ret; 3083 return ret;
2951 3084
2952 if (obj->pending_gpu_write || write) {
2953 ret = i915_gem_object_wait_rendering(obj);
2954 if (ret)
2955 return ret;
2956 }
2957
2958 i915_gem_object_flush_cpu_write_domain(obj); 3085 i915_gem_object_flush_cpu_write_domain(obj);
2959 3086
2960 old_write_domain = obj->base.write_domain; 3087 old_write_domain = obj->base.write_domain;
@@ -2997,6 +3124,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2997 return -EBUSY; 3124 return -EBUSY;
2998 } 3125 }
2999 3126
3127 if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3128 ret = i915_gem_object_unbind(obj);
3129 if (ret)
3130 return ret;
3131 }
3132
3000 if (obj->gtt_space) { 3133 if (obj->gtt_space) {
3001 ret = i915_gem_object_finish_gpu(obj); 3134 ret = i915_gem_object_finish_gpu(obj);
3002 if (ret) 3135 if (ret)
@@ -3008,7 +3141,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3008 * registers with snooped memory, so relinquish any fences 3141 * registers with snooped memory, so relinquish any fences
3009 * currently pointing to our region in the aperture. 3142 * currently pointing to our region in the aperture.
3010 */ 3143 */
3011 if (INTEL_INFO(obj->base.dev)->gen < 6) { 3144 if (INTEL_INFO(dev)->gen < 6) {
3012 ret = i915_gem_object_put_fence(obj); 3145 ret = i915_gem_object_put_fence(obj);
3013 if (ret) 3146 if (ret)
3014 return ret; 3147 return ret;
@@ -3019,6 +3152,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3019 if (obj->has_aliasing_ppgtt_mapping) 3152 if (obj->has_aliasing_ppgtt_mapping)
3020 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3153 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3021 obj, cache_level); 3154 obj, cache_level);
3155
3156 obj->gtt_space->color = cache_level;
3022 } 3157 }
3023 3158
3024 if (cache_level == I915_CACHE_NONE) { 3159 if (cache_level == I915_CACHE_NONE) {
@@ -3045,9 +3180,72 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3045 } 3180 }
3046 3181
3047 obj->cache_level = cache_level; 3182 obj->cache_level = cache_level;
3183 i915_gem_verify_gtt(dev);
3048 return 0; 3184 return 0;
3049} 3185}
3050 3186
3187int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3188 struct drm_file *file)
3189{
3190 struct drm_i915_gem_caching *args = data;
3191 struct drm_i915_gem_object *obj;
3192 int ret;
3193
3194 ret = i915_mutex_lock_interruptible(dev);
3195 if (ret)
3196 return ret;
3197
3198 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3199 if (&obj->base == NULL) {
3200 ret = -ENOENT;
3201 goto unlock;
3202 }
3203
3204 args->caching = obj->cache_level != I915_CACHE_NONE;
3205
3206 drm_gem_object_unreference(&obj->base);
3207unlock:
3208 mutex_unlock(&dev->struct_mutex);
3209 return ret;
3210}
3211
3212int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3213 struct drm_file *file)
3214{
3215 struct drm_i915_gem_caching *args = data;
3216 struct drm_i915_gem_object *obj;
3217 enum i915_cache_level level;
3218 int ret;
3219
3220 ret = i915_mutex_lock_interruptible(dev);
3221 if (ret)
3222 return ret;
3223
3224 switch (args->caching) {
3225 case I915_CACHING_NONE:
3226 level = I915_CACHE_NONE;
3227 break;
3228 case I915_CACHING_CACHED:
3229 level = I915_CACHE_LLC;
3230 break;
3231 default:
3232 return -EINVAL;
3233 }
3234
3235 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3236 if (&obj->base == NULL) {
3237 ret = -ENOENT;
3238 goto unlock;
3239 }
3240
3241 ret = i915_gem_object_set_cache_level(obj, level);
3242
3243 drm_gem_object_unreference(&obj->base);
3244unlock:
3245 mutex_unlock(&dev->struct_mutex);
3246 return ret;
3247}
3248
3051/* 3249/*
3052 * Prepare buffer for display plane (scanout, cursors, etc). 3250 * Prepare buffer for display plane (scanout, cursors, etc).
3053 * Can be called from an uninterruptible phase (modesetting) and allows 3251 * Can be called from an uninterruptible phase (modesetting) and allows
@@ -3061,10 +3259,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3061 u32 old_read_domains, old_write_domain; 3259 u32 old_read_domains, old_write_domain;
3062 int ret; 3260 int ret;
3063 3261
3064 ret = i915_gem_object_flush_gpu_write_domain(obj);
3065 if (ret)
3066 return ret;
3067
3068 if (pipelined != obj->ring) { 3262 if (pipelined != obj->ring) {
3069 ret = i915_gem_object_sync(obj, pipelined); 3263 ret = i915_gem_object_sync(obj, pipelined);
3070 if (ret) 3264 if (ret)
@@ -3088,7 +3282,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3088 * (e.g. libkms for the bootup splash), we have to ensure that we 3282 * (e.g. libkms for the bootup splash), we have to ensure that we
3089 * always use map_and_fenceable for all scanout buffers. 3283 * always use map_and_fenceable for all scanout buffers.
3090 */ 3284 */
3091 ret = i915_gem_object_pin(obj, alignment, true); 3285 ret = i915_gem_object_pin(obj, alignment, true, false);
3092 if (ret) 3286 if (ret)
3093 return ret; 3287 return ret;
3094 3288
@@ -3100,7 +3294,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3100 /* It should now be out of any other write domains, and we can update 3294 /* It should now be out of any other write domains, and we can update
3101 * the domain values for our changes. 3295 * the domain values for our changes.
3102 */ 3296 */
3103 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3297 obj->base.write_domain = 0;
3104 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3298 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3105 3299
3106 trace_i915_gem_object_change_domain(obj, 3300 trace_i915_gem_object_change_domain(obj,
@@ -3118,13 +3312,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3118 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) 3312 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3119 return 0; 3313 return 0;
3120 3314
3121 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3315 ret = i915_gem_object_wait_rendering(obj, false);
3122 ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
3123 if (ret)
3124 return ret;
3125 }
3126
3127 ret = i915_gem_object_wait_rendering(obj);
3128 if (ret) 3316 if (ret)
3129 return ret; 3317 return ret;
3130 3318
@@ -3148,16 +3336,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3148 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) 3336 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3149 return 0; 3337 return 0;
3150 3338
3151 ret = i915_gem_object_flush_gpu_write_domain(obj); 3339 ret = i915_gem_object_wait_rendering(obj, !write);
3152 if (ret) 3340 if (ret)
3153 return ret; 3341 return ret;
3154 3342
3155 if (write || obj->pending_gpu_write) {
3156 ret = i915_gem_object_wait_rendering(obj);
3157 if (ret)
3158 return ret;
3159 }
3160
3161 i915_gem_object_flush_gtt_write_domain(obj); 3343 i915_gem_object_flush_gtt_write_domain(obj);
3162 3344
3163 old_write_domain = obj->base.write_domain; 3345 old_write_domain = obj->base.write_domain;
@@ -3237,7 +3419,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3237int 3419int
3238i915_gem_object_pin(struct drm_i915_gem_object *obj, 3420i915_gem_object_pin(struct drm_i915_gem_object *obj,
3239 uint32_t alignment, 3421 uint32_t alignment,
3240 bool map_and_fenceable) 3422 bool map_and_fenceable,
3423 bool nonblocking)
3241{ 3424{
3242 int ret; 3425 int ret;
3243 3426
@@ -3262,7 +3445,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3262 3445
3263 if (obj->gtt_space == NULL) { 3446 if (obj->gtt_space == NULL) {
3264 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3447 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3265 map_and_fenceable); 3448 map_and_fenceable,
3449 nonblocking);
3266 if (ret) 3450 if (ret)
3267 return ret; 3451 return ret;
3268 } 3452 }
@@ -3320,7 +3504,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3320 obj->user_pin_count++; 3504 obj->user_pin_count++;
3321 obj->pin_filp = file; 3505 obj->pin_filp = file;
3322 if (obj->user_pin_count == 1) { 3506 if (obj->user_pin_count == 1) {
3323 ret = i915_gem_object_pin(obj, args->alignment, true); 3507 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3324 if (ret) 3508 if (ret)
3325 goto out; 3509 goto out;
3326 } 3510 }
@@ -3400,6 +3584,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3400 ret = i915_gem_object_flush_active(obj); 3584 ret = i915_gem_object_flush_active(obj);
3401 3585
3402 args->busy = obj->active; 3586 args->busy = obj->active;
3587 if (obj->ring) {
3588 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3589 args->busy |= intel_ring_flag(obj->ring) << 16;
3590 }
3403 3591
3404 drm_gem_object_unreference(&obj->base); 3592 drm_gem_object_unreference(&obj->base);
3405unlock: 3593unlock:
@@ -3448,9 +3636,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3448 if (obj->madv != __I915_MADV_PURGED) 3636 if (obj->madv != __I915_MADV_PURGED)
3449 obj->madv = args->madv; 3637 obj->madv = args->madv;
3450 3638
3451 /* if the object is no longer bound, discard its backing storage */ 3639 /* if the object is no longer attached, discard its backing storage */
3452 if (i915_gem_object_is_purgeable(obj) && 3640 if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3453 obj->gtt_space == NULL)
3454 i915_gem_object_truncate(obj); 3641 i915_gem_object_truncate(obj);
3455 3642
3456 args->retained = obj->madv != __I915_MADV_PURGED; 3643 args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3462,10 +3649,32 @@ unlock:
3462 return ret; 3649 return ret;
3463} 3650}
3464 3651
3652void i915_gem_object_init(struct drm_i915_gem_object *obj,
3653 const struct drm_i915_gem_object_ops *ops)
3654{
3655 INIT_LIST_HEAD(&obj->mm_list);
3656 INIT_LIST_HEAD(&obj->gtt_list);
3657 INIT_LIST_HEAD(&obj->ring_list);
3658 INIT_LIST_HEAD(&obj->exec_list);
3659
3660 obj->ops = ops;
3661
3662 obj->fence_reg = I915_FENCE_REG_NONE;
3663 obj->madv = I915_MADV_WILLNEED;
3664 /* Avoid an unnecessary call to unbind on the first bind. */
3665 obj->map_and_fenceable = true;
3666
3667 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3668}
3669
3670static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3671 .get_pages = i915_gem_object_get_pages_gtt,
3672 .put_pages = i915_gem_object_put_pages_gtt,
3673};
3674
3465struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3675struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3466 size_t size) 3676 size_t size)
3467{ 3677{
3468 struct drm_i915_private *dev_priv = dev->dev_private;
3469 struct drm_i915_gem_object *obj; 3678 struct drm_i915_gem_object *obj;
3470 struct address_space *mapping; 3679 struct address_space *mapping;
3471 u32 mask; 3680 u32 mask;
@@ -3489,7 +3698,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3489 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 3698 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3490 mapping_set_gfp_mask(mapping, mask); 3699 mapping_set_gfp_mask(mapping, mask);
3491 3700
3492 i915_gem_info_add_obj(dev_priv, size); 3701 i915_gem_object_init(obj, &i915_gem_object_ops);
3493 3702
3494 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3703 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3495 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3704 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3511,17 +3720,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3511 } else 3720 } else
3512 obj->cache_level = I915_CACHE_NONE; 3721 obj->cache_level = I915_CACHE_NONE;
3513 3722
3514 obj->base.driver_private = NULL;
3515 obj->fence_reg = I915_FENCE_REG_NONE;
3516 INIT_LIST_HEAD(&obj->mm_list);
3517 INIT_LIST_HEAD(&obj->gtt_list);
3518 INIT_LIST_HEAD(&obj->ring_list);
3519 INIT_LIST_HEAD(&obj->exec_list);
3520 INIT_LIST_HEAD(&obj->gpu_write_list);
3521 obj->madv = I915_MADV_WILLNEED;
3522 /* Avoid an unnecessary call to unbind on the first bind. */
3523 obj->map_and_fenceable = true;
3524
3525 return obj; 3723 return obj;
3526} 3724}
3527 3725
@@ -3540,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3540 3738
3541 trace_i915_gem_object_destroy(obj); 3739 trace_i915_gem_object_destroy(obj);
3542 3740
3543 if (gem_obj->import_attach)
3544 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3545
3546 if (obj->phys_obj) 3741 if (obj->phys_obj)
3547 i915_gem_detach_phys_object(dev, obj); 3742 i915_gem_detach_phys_object(dev, obj);
3548 3743
@@ -3558,8 +3753,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3558 dev_priv->mm.interruptible = was_interruptible; 3753 dev_priv->mm.interruptible = was_interruptible;
3559 } 3754 }
3560 3755
3561 if (obj->base.map_list.map) 3756 obj->pages_pin_count = 0;
3562 drm_gem_free_mmap_offset(&obj->base); 3757 i915_gem_object_put_pages(obj);
3758 i915_gem_object_free_mmap_offset(obj);
3759
3760 BUG_ON(obj->pages);
3761
3762 if (obj->base.import_attach)
3763 drm_prime_gem_destroy(&obj->base, NULL);
3563 3764
3564 drm_gem_object_release(&obj->base); 3765 drm_gem_object_release(&obj->base);
3565 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3766 i915_gem_info_remove_obj(dev_priv, obj->base.size);
@@ -3590,7 +3791,7 @@ i915_gem_idle(struct drm_device *dev)
3590 3791
3591 /* Under UMS, be paranoid and evict. */ 3792 /* Under UMS, be paranoid and evict. */
3592 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3793 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3593 i915_gem_evict_everything(dev, false); 3794 i915_gem_evict_everything(dev);
3594 3795
3595 i915_gem_reset_fences(dev); 3796 i915_gem_reset_fences(dev);
3596 3797
@@ -3891,7 +4092,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3891 } 4092 }
3892 4093
3893 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4094 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3894 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3895 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 4095 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3896 mutex_unlock(&dev->struct_mutex); 4096 mutex_unlock(&dev->struct_mutex);
3897 4097
@@ -3939,7 +4139,6 @@ init_ring_lists(struct intel_ring_buffer *ring)
3939{ 4139{
3940 INIT_LIST_HEAD(&ring->active_list); 4140 INIT_LIST_HEAD(&ring->active_list);
3941 INIT_LIST_HEAD(&ring->request_list); 4141 INIT_LIST_HEAD(&ring->request_list);
3942 INIT_LIST_HEAD(&ring->gpu_write_list);
3943} 4142}
3944 4143
3945void 4144void
@@ -3949,10 +4148,10 @@ i915_gem_load(struct drm_device *dev)
3949 drm_i915_private_t *dev_priv = dev->dev_private; 4148 drm_i915_private_t *dev_priv = dev->dev_private;
3950 4149
3951 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4150 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3952 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3953 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4151 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4152 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4153 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
3954 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4154 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
3955 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3956 for (i = 0; i < I915_NUM_RINGS; i++) 4155 for (i = 0; i < I915_NUM_RINGS; i++)
3957 init_ring_lists(&dev_priv->ring[i]); 4156 init_ring_lists(&dev_priv->ring[i]);
3958 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 4157 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
@@ -4197,18 +4396,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4197} 4396}
4198 4397
4199static int 4398static int
4200i915_gpu_is_active(struct drm_device *dev)
4201{
4202 drm_i915_private_t *dev_priv = dev->dev_private;
4203 int lists_empty;
4204
4205 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4206 list_empty(&dev_priv->mm.active_list);
4207
4208 return !lists_empty;
4209}
4210
4211static int
4212i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4399i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4213{ 4400{
4214 struct drm_i915_private *dev_priv = 4401 struct drm_i915_private *dev_priv =
@@ -4216,60 +4403,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4216 struct drm_i915_private, 4403 struct drm_i915_private,
4217 mm.inactive_shrinker); 4404 mm.inactive_shrinker);
4218 struct drm_device *dev = dev_priv->dev; 4405 struct drm_device *dev = dev_priv->dev;
4219 struct drm_i915_gem_object *obj, *next; 4406 struct drm_i915_gem_object *obj;
4220 int nr_to_scan = sc->nr_to_scan; 4407 int nr_to_scan = sc->nr_to_scan;
4221 int cnt; 4408 int cnt;
4222 4409
4223 if (!mutex_trylock(&dev->struct_mutex)) 4410 if (!mutex_trylock(&dev->struct_mutex))
4224 return 0; 4411 return 0;
4225 4412
4226 /* "fast-path" to count number of available objects */ 4413 if (nr_to_scan) {
4227 if (nr_to_scan == 0) { 4414 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4228 cnt = 0; 4415 if (nr_to_scan > 0)
4229 list_for_each_entry(obj, 4416 i915_gem_shrink_all(dev_priv);
4230 &dev_priv->mm.inactive_list,
4231 mm_list)
4232 cnt++;
4233 mutex_unlock(&dev->struct_mutex);
4234 return cnt / 100 * sysctl_vfs_cache_pressure;
4235 }
4236
4237rescan:
4238 /* first scan for clean buffers */
4239 i915_gem_retire_requests(dev);
4240
4241 list_for_each_entry_safe(obj, next,
4242 &dev_priv->mm.inactive_list,
4243 mm_list) {
4244 if (i915_gem_object_is_purgeable(obj)) {
4245 if (i915_gem_object_unbind(obj) == 0 &&
4246 --nr_to_scan == 0)
4247 break;
4248 }
4249 } 4417 }
4250 4418
4251 /* second pass, evict/count anything still on the inactive list */
4252 cnt = 0; 4419 cnt = 0;
4253 list_for_each_entry_safe(obj, next, 4420 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4254 &dev_priv->mm.inactive_list, 4421 if (obj->pages_pin_count == 0)
4255 mm_list) { 4422 cnt += obj->base.size >> PAGE_SHIFT;
4256 if (nr_to_scan && 4423 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
4257 i915_gem_object_unbind(obj) == 0) 4424 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4258 nr_to_scan--; 4425 cnt += obj->base.size >> PAGE_SHIFT;
4259 else
4260 cnt++;
4261 }
4262 4426
4263 if (nr_to_scan && i915_gpu_is_active(dev)) {
4264 /*
4265 * We are desperate for pages, so as a last resort, wait
4266 * for the GPU to finish and discard whatever we can.
4267 * This has a dramatic impact to reduce the number of
4268 * OOM-killer events whilst running the GPU aggressively.
4269 */
4270 if (i915_gpu_idle(dev) == 0)
4271 goto rescan;
4272 }
4273 mutex_unlock(&dev->struct_mutex); 4427 mutex_unlock(&dev->struct_mutex);
4274 return cnt / 100 * sysctl_vfs_cache_pressure; 4428 return cnt;
4275} 4429}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a21c3dccf436..1eb48faf741b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,8 +97,7 @@
97 97
98static struct i915_hw_context * 98static struct i915_hw_context *
99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 99i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
100static int do_switch(struct drm_i915_gem_object *from_obj, 100static int do_switch(struct i915_hw_context *to);
101 struct i915_hw_context *to, u32 seqno);
102 101
103static int get_context_size(struct drm_device *dev) 102static int get_context_size(struct drm_device *dev)
104{ 103{
@@ -113,7 +112,10 @@ static int get_context_size(struct drm_device *dev)
113 break; 112 break;
114 case 7: 113 case 7:
115 reg = I915_READ(GEN7_CXT_SIZE); 114 reg = I915_READ(GEN7_CXT_SIZE);
116 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 115 if (IS_HASWELL(dev))
116 ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
117 else
118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
117 break; 119 break;
118 default: 120 default:
119 BUG(); 121 BUG();
@@ -219,20 +221,21 @@ static int create_default_context(struct drm_i915_private *dev_priv)
219 * default context. 221 * default context.
220 */ 222 */
221 dev_priv->ring[RCS].default_context = ctx; 223 dev_priv->ring[RCS].default_context = ctx;
222 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); 224 ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
223 if (ret) { 225 if (ret)
224 do_destroy(ctx); 226 goto err_destroy;
225 return ret;
226 }
227 227
228 ret = do_switch(NULL, ctx, 0); 228 ret = do_switch(ctx);
229 if (ret) { 229 if (ret)
230 i915_gem_object_unpin(ctx->obj); 230 goto err_unpin;
231 do_destroy(ctx);
232 } else {
233 DRM_DEBUG_DRIVER("Default HW context loaded\n");
234 }
235 231
232 DRM_DEBUG_DRIVER("Default HW context loaded\n");
233 return 0;
234
235err_unpin:
236 i915_gem_object_unpin(ctx->obj);
237err_destroy:
238 do_destroy(ctx);
236 return ret; 239 return ret;
237} 240}
238 241
@@ -359,18 +362,19 @@ mi_set_context(struct intel_ring_buffer *ring,
359 return ret; 362 return ret;
360} 363}
361 364
362static int do_switch(struct drm_i915_gem_object *from_obj, 365static int do_switch(struct i915_hw_context *to)
363 struct i915_hw_context *to,
364 u32 seqno)
365{ 366{
366 struct intel_ring_buffer *ring = NULL; 367 struct intel_ring_buffer *ring = to->ring;
368 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
367 u32 hw_flags = 0; 369 u32 hw_flags = 0;
368 int ret; 370 int ret;
369 371
370 BUG_ON(to == NULL);
371 BUG_ON(from_obj != NULL && from_obj->pin_count == 0); 372 BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
372 373
373 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); 374 if (from_obj == to->obj)
375 return 0;
376
377 ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
374 if (ret) 378 if (ret)
375 return ret; 379 return ret;
376 380
@@ -393,7 +397,6 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
393 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */ 397 else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
394 hw_flags |= MI_FORCE_RESTORE; 398 hw_flags |= MI_FORCE_RESTORE;
395 399
396 ring = to->ring;
397 ret = mi_set_context(ring, to, hw_flags); 400 ret = mi_set_context(ring, to, hw_flags);
398 if (ret) { 401 if (ret) {
399 i915_gem_object_unpin(to->obj); 402 i915_gem_object_unpin(to->obj);
@@ -407,6 +410,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
407 * MI_SET_CONTEXT instead of when the next seqno has completed. 410 * MI_SET_CONTEXT instead of when the next seqno has completed.
408 */ 411 */
409 if (from_obj != NULL) { 412 if (from_obj != NULL) {
413 u32 seqno = i915_gem_next_request_seqno(ring);
410 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 414 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
411 i915_gem_object_move_to_active(from_obj, ring, seqno); 415 i915_gem_object_move_to_active(from_obj, ring, seqno);
412 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 416 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
@@ -417,7 +421,7 @@ static int do_switch(struct drm_i915_gem_object *from_obj,
417 * swapped, but there is no way to do that yet. 421 * swapped, but there is no way to do that yet.
418 */ 422 */
419 from_obj->dirty = 1; 423 from_obj->dirty = 1;
420 BUG_ON(from_obj->ring != to->ring); 424 BUG_ON(from_obj->ring != ring);
421 i915_gem_object_unpin(from_obj); 425 i915_gem_object_unpin(from_obj);
422 426
423 drm_gem_object_unreference(&from_obj->base); 427 drm_gem_object_unreference(&from_obj->base);
@@ -448,9 +452,7 @@ int i915_switch_context(struct intel_ring_buffer *ring,
448 int to_id) 452 int to_id)
449{ 453{
450 struct drm_i915_private *dev_priv = ring->dev->dev_private; 454 struct drm_i915_private *dev_priv = ring->dev->dev_private;
451 struct drm_i915_file_private *file_priv = NULL;
452 struct i915_hw_context *to; 455 struct i915_hw_context *to;
453 struct drm_i915_gem_object *from_obj = ring->last_context_obj;
454 456
455 if (dev_priv->hw_contexts_disabled) 457 if (dev_priv->hw_contexts_disabled)
456 return 0; 458 return 0;
@@ -458,21 +460,18 @@ int i915_switch_context(struct intel_ring_buffer *ring,
458 if (ring != &dev_priv->ring[RCS]) 460 if (ring != &dev_priv->ring[RCS])
459 return 0; 461 return 0;
460 462
461 if (file)
462 file_priv = file->driver_priv;
463
464 if (to_id == DEFAULT_CONTEXT_ID) { 463 if (to_id == DEFAULT_CONTEXT_ID) {
465 to = ring->default_context; 464 to = ring->default_context;
466 } else { 465 } else {
467 to = i915_gem_context_get(file_priv, to_id); 466 if (file == NULL)
467 return -EINVAL;
468
469 to = i915_gem_context_get(file->driver_priv, to_id);
468 if (to == NULL) 470 if (to == NULL)
469 return -ENOENT; 471 return -ENOENT;
470 } 472 }
471 473
472 if (from_obj == to->obj) 474 return do_switch(to);
473 return 0;
474
475 return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
476} 475}
477 476
478int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 477int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index af199596e792..773ef77b6c22 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,35 +28,62 @@
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 31 enum dma_data_direction dir)
32{ 32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev; 34 struct sg_table *st;
35 int npages = obj->base.size / PAGE_SIZE; 35 struct scatterlist *src, *dst;
36 struct sg_table *sg = NULL; 36 int ret, i;
37 int ret;
38 int nents;
39 37
40 ret = i915_mutex_lock_interruptible(dev); 38 ret = i915_mutex_lock_interruptible(obj->base.dev);
41 if (ret) 39 if (ret)
42 return ERR_PTR(ret); 40 return ERR_PTR(ret);
43 41
44 if (!obj->pages) { 42 ret = i915_gem_object_get_pages(obj);
45 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); 43 if (ret) {
46 if (ret) 44 st = ERR_PTR(ret);
47 goto out; 45 goto out;
46 }
47
48 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
48 } 53 }
49 54
50 /* link the pages into an SG then map the sg */ 55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
51 sg = drm_prime_pages_to_sg(obj->pages, npages); 56 if (ret) {
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir); 57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
74 goto out;
75 }
76
77 i915_gem_object_pin_pages(obj);
78
53out: 79out:
54 mutex_unlock(&dev->struct_mutex); 80 mutex_unlock(&obj->base.dev->struct_mutex);
55 return sg; 81 return st;
56} 82}
57 83
58static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59 struct sg_table *sg, enum dma_data_direction dir) 85 struct sg_table *sg,
86 enum dma_data_direction dir)
60{ 87{
61 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62 sg_free_table(sg); 89 sg_free_table(sg);
@@ -78,7 +105,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78{ 105{
79 struct drm_i915_gem_object *obj = dma_buf->priv; 106 struct drm_i915_gem_object *obj = dma_buf->priv;
80 struct drm_device *dev = obj->base.dev; 107 struct drm_device *dev = obj->base.dev;
81 int ret; 108 struct scatterlist *sg;
109 struct page **pages;
110 int ret, i;
82 111
83 ret = i915_mutex_lock_interruptible(dev); 112 ret = i915_mutex_lock_interruptible(dev);
84 if (ret) 113 if (ret)
@@ -89,24 +118,34 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
89 goto out_unlock; 118 goto out_unlock;
90 } 119 }
91 120
92 if (!obj->pages) { 121 ret = i915_gem_object_get_pages(obj);
93 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); 122 if (ret)
94 if (ret) { 123 goto error;
95 mutex_unlock(&dev->struct_mutex);
96 return ERR_PTR(ret);
97 }
98 }
99 124
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); 125 ret = -ENOMEM;
101 if (!obj->dma_buf_vmapping) { 126
102 DRM_ERROR("failed to vmap object\n"); 127 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
103 goto out_unlock; 128 if (pages == NULL)
104 } 129 goto error;
130
131 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
132 pages[i] = sg_page(sg);
133
134 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
135 drm_free_large(pages);
136
137 if (!obj->dma_buf_vmapping)
138 goto error;
105 139
106 obj->vmapping_count = 1; 140 obj->vmapping_count = 1;
141 i915_gem_object_pin_pages(obj);
107out_unlock: 142out_unlock:
108 mutex_unlock(&dev->struct_mutex); 143 mutex_unlock(&dev->struct_mutex);
109 return obj->dma_buf_vmapping; 144 return obj->dma_buf_vmapping;
145
146error:
147 mutex_unlock(&dev->struct_mutex);
148 return ERR_PTR(ret);
110} 149}
111 150
112static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 151static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -119,10 +158,11 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
119 if (ret) 158 if (ret)
120 return; 159 return;
121 160
122 --obj->vmapping_count; 161 if (--obj->vmapping_count == 0) {
123 if (obj->vmapping_count == 0) {
124 vunmap(obj->dma_buf_vmapping); 162 vunmap(obj->dma_buf_vmapping);
125 obj->dma_buf_vmapping = NULL; 163 obj->dma_buf_vmapping = NULL;
164
165 i915_gem_object_unpin_pages(obj);
126 } 166 }
127 mutex_unlock(&dev->struct_mutex); 167 mutex_unlock(&dev->struct_mutex);
128} 168}
@@ -151,6 +191,22 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
151 return -EINVAL; 191 return -EINVAL;
152} 192}
153 193
194static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
195{
196 struct drm_i915_gem_object *obj = dma_buf->priv;
197 struct drm_device *dev = obj->base.dev;
198 int ret;
199 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
200
201 ret = i915_mutex_lock_interruptible(dev);
202 if (ret)
203 return ret;
204
205 ret = i915_gem_object_set_to_cpu_domain(obj, write);
206 mutex_unlock(&dev->struct_mutex);
207 return ret;
208}
209
154static const struct dma_buf_ops i915_dmabuf_ops = { 210static const struct dma_buf_ops i915_dmabuf_ops = {
155 .map_dma_buf = i915_gem_map_dma_buf, 211 .map_dma_buf = i915_gem_map_dma_buf,
156 .unmap_dma_buf = i915_gem_unmap_dma_buf, 212 .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -162,25 +218,47 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
162 .mmap = i915_gem_dmabuf_mmap, 218 .mmap = i915_gem_dmabuf_mmap,
163 .vmap = i915_gem_dmabuf_vmap, 219 .vmap = i915_gem_dmabuf_vmap,
164 .vunmap = i915_gem_dmabuf_vunmap, 220 .vunmap = i915_gem_dmabuf_vunmap,
221 .begin_cpu_access = i915_gem_begin_cpu_access,
165}; 222};
166 223
167struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 224struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
168 struct drm_gem_object *gem_obj, int flags) 225 struct drm_gem_object *gem_obj, int flags)
169{ 226{
170 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
171 228
172 return dma_buf_export(obj, &i915_dmabuf_ops, 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
173 obj->base.size, 0600); 230}
231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
233{
234 struct sg_table *sg;
235
236 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
237 if (IS_ERR(sg))
238 return PTR_ERR(sg);
239
240 obj->pages = sg;
241 obj->has_dma_mapping = true;
242 return 0;
174} 243}
175 244
245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
246{
247 dma_buf_unmap_attachment(obj->base.import_attach,
248 obj->pages, DMA_BIDIRECTIONAL);
249 obj->has_dma_mapping = false;
250}
251
252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
253 .get_pages = i915_gem_object_get_pages_dmabuf,
254 .put_pages = i915_gem_object_put_pages_dmabuf,
255};
256
176struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
177 struct dma_buf *dma_buf) 258 struct dma_buf *dma_buf)
178{ 259{
179 struct dma_buf_attachment *attach; 260 struct dma_buf_attachment *attach;
180 struct sg_table *sg;
181 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
182 int npages;
183 int size;
184 int ret; 262 int ret;
185 263
186 /* is this one of own objects? */ 264 /* is this one of own objects? */
@@ -198,34 +276,24 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
198 if (IS_ERR(attach)) 276 if (IS_ERR(attach))
199 return ERR_CAST(attach); 277 return ERR_CAST(attach);
200 278
201 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
202 if (IS_ERR(sg)) {
203 ret = PTR_ERR(sg);
204 goto fail_detach;
205 }
206
207 size = dma_buf->size;
208 npages = size / PAGE_SIZE;
209 279
210 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 280 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
211 if (obj == NULL) { 281 if (obj == NULL) {
212 ret = -ENOMEM; 282 ret = -ENOMEM;
213 goto fail_unmap; 283 goto fail_detach;
214 } 284 }
215 285
216 ret = drm_gem_private_object_init(dev, &obj->base, size); 286 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
217 if (ret) { 287 if (ret) {
218 kfree(obj); 288 kfree(obj);
219 goto fail_unmap; 289 goto fail_detach;
220 } 290 }
221 291
222 obj->sg_table = sg; 292 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
223 obj->base.import_attach = attach; 293 obj->base.import_attach = attach;
224 294
225 return &obj->base; 295 return &obj->base;
226 296
227fail_unmap:
228 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
229fail_detach: 297fail_detach:
230 dma_buf_detach(dma_buf, attach); 298 dma_buf_detach(dma_buf, attach);
231 return ERR_PTR(ret); 299 return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index fd408995a783..776a3225184c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,7 +43,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
43 43
44int 44int
45i915_gem_evict_something(struct drm_device *dev, int min_size, 45i915_gem_evict_something(struct drm_device *dev, int min_size,
46 unsigned alignment, bool mappable) 46 unsigned alignment, unsigned cache_level,
47 bool mappable, bool nonblocking)
47{ 48{
48 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
49 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
@@ -78,11 +79,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
78 INIT_LIST_HEAD(&unwind_list); 79 INIT_LIST_HEAD(&unwind_list);
79 if (mappable) 80 if (mappable)
80 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
81 min_size, alignment, 0, 82 min_size, alignment, cache_level,
82 0, dev_priv->mm.gtt_mappable_end); 83 0, dev_priv->mm.gtt_mappable_end);
83 else 84 else
84 drm_mm_init_scan(&dev_priv->mm.gtt_space, 85 drm_mm_init_scan(&dev_priv->mm.gtt_space,
85 min_size, alignment, 0); 86 min_size, alignment, cache_level);
86 87
87 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
88 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
@@ -90,29 +91,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
90 goto found; 91 goto found;
91 } 92 }
92 93
93 /* Now merge in the soon-to-be-expired objects... */ 94 if (nonblocking)
94 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 95 goto none;
95 /* Does the object require an outstanding flush? */
96 if (obj->base.write_domain)
97 continue;
98
99 if (mark_free(obj, &unwind_list))
100 goto found;
101 }
102 96
103 /* Finally add anything with a pending flush (in order of retirement) */ 97 /* Now merge in the soon-to-be-expired objects... */
104 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
105 if (mark_free(obj, &unwind_list))
106 goto found;
107 }
108 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (!obj->base.write_domain)
110 continue;
111
112 if (mark_free(obj, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
113 goto found; 100 goto found;
114 } 101 }
115 102
103none:
116 /* Nothing found, clean up and bail out! */ 104 /* Nothing found, clean up and bail out! */
117 while (!list_empty(&unwind_list)) { 105 while (!list_empty(&unwind_list)) {
118 obj = list_first_entry(&unwind_list, 106 obj = list_first_entry(&unwind_list,
@@ -163,7 +151,7 @@ found:
163} 151}
164 152
165int 153int
166i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) 154i915_gem_evict_everything(struct drm_device *dev)
167{ 155{
168 drm_i915_private_t *dev_priv = dev->dev_private; 156 drm_i915_private_t *dev_priv = dev->dev_private;
169 struct drm_i915_gem_object *obj, *next; 157 struct drm_i915_gem_object *obj, *next;
@@ -171,12 +159,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
171 int ret; 159 int ret;
172 160
173 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
174 list_empty(&dev_priv->mm.flushing_list) &&
175 list_empty(&dev_priv->mm.active_list)); 162 list_empty(&dev_priv->mm.active_list));
176 if (lists_empty) 163 if (lists_empty)
177 return -ENOSPC; 164 return -ENOSPC;
178 165
179 trace_i915_gem_evict_everything(dev, purgeable_only); 166 trace_i915_gem_evict_everything(dev);
180 167
181 /* The gpu_idle will flush everything in the write domain to the 168 /* The gpu_idle will flush everything in the write domain to the
182 * active list. Then we must move everything off the active list 169 * active list. Then we must move everything off the active list
@@ -188,16 +175,11 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
188 175
189 i915_gem_retire_requests(dev); 176 i915_gem_retire_requests(dev);
190 177
191 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
192
193 /* Having flushed everything, unbind() should never raise an error */ 178 /* Having flushed everything, unbind() should never raise an error */
194 list_for_each_entry_safe(obj, next, 179 list_for_each_entry_safe(obj, next,
195 &dev_priv->mm.inactive_list, mm_list) { 180 &dev_priv->mm.inactive_list, mm_list)
196 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { 181 if (obj->pin_count == 0)
197 if (obj->pin_count == 0) 182 WARN_ON(i915_gem_object_unbind(obj));
198 WARN_ON(i915_gem_object_unbind(obj));
199 }
200 }
201 183
202 return 0; 184 return 0;
203} 185}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8dd9a6f47db8..3eea143749f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,180 +33,6 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40 uint32_t flips;
41};
42
43/*
44 * Set the next domain for the specified object. This
45 * may not actually perform the necessary flushing/invaliding though,
46 * as that may want to be batched with other set_domain operations
47 *
48 * This is (we hope) the only really tricky part of gem. The goal
49 * is fairly simple -- track which caches hold bits of the object
50 * and make sure they remain coherent. A few concrete examples may
51 * help to explain how it works. For shorthand, we use the notation
52 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
53 * a pair of read and write domain masks.
54 *
55 * Case 1: the batch buffer
56 *
57 * 1. Allocated
58 * 2. Written by CPU
59 * 3. Mapped to GTT
60 * 4. Read by GPU
61 * 5. Unmapped from GTT
62 * 6. Freed
63 *
64 * Let's take these a step at a time
65 *
66 * 1. Allocated
67 * Pages allocated from the kernel may still have
68 * cache contents, so we set them to (CPU, CPU) always.
69 * 2. Written by CPU (using pwrite)
70 * The pwrite function calls set_domain (CPU, CPU) and
71 * this function does nothing (as nothing changes)
72 * 3. Mapped by GTT
73 * This function asserts that the object is not
74 * currently in any GPU-based read or write domains
75 * 4. Read by GPU
76 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
77 * As write_domain is zero, this function adds in the
78 * current read domains (CPU+COMMAND, 0).
79 * flush_domains is set to CPU.
80 * invalidate_domains is set to COMMAND
81 * clflush is run to get data out of the CPU caches
82 * then i915_dev_set_domain calls i915_gem_flush to
83 * emit an MI_FLUSH and drm_agp_chipset_flush
84 * 5. Unmapped from GTT
85 * i915_gem_object_unbind calls set_domain (CPU, CPU)
86 * flush_domains and invalidate_domains end up both zero
87 * so no flushing/invalidating happens
88 * 6. Freed
89 * yay, done
90 *
91 * Case 2: The shared render buffer
92 *
93 * 1. Allocated
94 * 2. Mapped to GTT
95 * 3. Read/written by GPU
96 * 4. set_domain to (CPU,CPU)
97 * 5. Read/written by CPU
98 * 6. Read/written by GPU
99 *
100 * 1. Allocated
101 * Same as last example, (CPU, CPU)
102 * 2. Mapped to GTT
103 * Nothing changes (assertions find that it is not in the GPU)
104 * 3. Read/written by GPU
105 * execbuffer calls set_domain (RENDER, RENDER)
106 * flush_domains gets CPU
107 * invalidate_domains gets GPU
108 * clflush (obj)
109 * MI_FLUSH and drm_agp_chipset_flush
110 * 4. set_domain (CPU, CPU)
111 * flush_domains gets GPU
112 * invalidate_domains gets CPU
113 * wait_rendering (obj) to make sure all drawing is complete.
114 * This will include an MI_FLUSH to get the data from GPU
115 * to memory
116 * clflush (obj) to invalidate the CPU cache
117 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
118 * 5. Read/written by CPU
119 * cache lines are loaded and dirtied
120 * 6. Read written by GPU
121 * Same as last GPU access
122 *
123 * Case 3: The constant buffer
124 *
125 * 1. Allocated
126 * 2. Written by CPU
127 * 3. Read by GPU
128 * 4. Updated (written) by CPU again
129 * 5. Read by GPU
130 *
131 * 1. Allocated
132 * (CPU, CPU)
133 * 2. Written by CPU
134 * (CPU, CPU)
135 * 3. Read by GPU
136 * (CPU+RENDER, 0)
137 * flush_domains = CPU
138 * invalidate_domains = RENDER
139 * clflush (obj)
140 * MI_FLUSH
141 * drm_agp_chipset_flush
142 * 4. Updated (written) by CPU again
143 * (CPU, CPU)
144 * flush_domains = 0 (no previous write domain)
145 * invalidate_domains = 0 (no new read domains)
146 * 5. Read by GPU
147 * (CPU+RENDER, 0)
148 * flush_domains = CPU
149 * invalidate_domains = RENDER
150 * clflush (obj)
151 * MI_FLUSH
152 * drm_agp_chipset_flush
153 */
154static void
155i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
156 struct intel_ring_buffer *ring,
157 struct change_domains *cd)
158{
159 uint32_t invalidate_domains = 0, flush_domains = 0;
160
161 /*
162 * If the object isn't moving to a new write domain,
163 * let the object stay in multiple read domains
164 */
165 if (obj->base.pending_write_domain == 0)
166 obj->base.pending_read_domains |= obj->base.read_domains;
167
168 /*
169 * Flush the current write domain if
170 * the new read domains don't match. Invalidate
171 * any read domains which differ from the old
172 * write domain
173 */
174 if (obj->base.write_domain &&
175 (((obj->base.write_domain != obj->base.pending_read_domains ||
176 obj->ring != ring)) ||
177 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
178 flush_domains |= obj->base.write_domain;
179 invalidate_domains |=
180 obj->base.pending_read_domains & ~obj->base.write_domain;
181 }
182 /*
183 * Invalidate any read caches which may have
184 * stale data. That is, any new read domains.
185 */
186 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
187 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
188 i915_gem_clflush_object(obj);
189
190 if (obj->base.pending_write_domain)
191 cd->flips |= atomic_read(&obj->pending_flip);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= intel_ring_flag(obj->ring);
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= intel_ring_flag(ring);
208}
209
210struct eb_objects { 36struct eb_objects {
211 int and; 37 int and;
212 struct hlist_head buckets[0]; 38 struct hlist_head buckets[0];
@@ -217,6 +43,7 @@ eb_create(int size)
217{ 43{
218 struct eb_objects *eb; 44 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 45 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
46 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
220 while (count > size) 47 while (count > size)
221 count >>= 1; 48 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) + 49 eb = kzalloc(count*sizeof(struct hlist_head) +
@@ -268,6 +95,7 @@ eb_destroy(struct eb_objects *eb)
268static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) 95static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
269{ 96{
270 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || 97 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
98 !obj->map_and_fenceable ||
271 obj->cache_level != I915_CACHE_NONE); 99 obj->cache_level != I915_CACHE_NONE);
272} 100}
273 101
@@ -382,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
382 if (ret) 210 if (ret)
383 return ret; 211 return ret;
384 212
385 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); 213 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
214 reloc->offset >> PAGE_SHIFT));
386 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 215 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
387 kunmap_atomic(vaddr); 216 kunmap_atomic(vaddr);
388 } else { 217 } else {
@@ -503,7 +332,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
503 return ret; 332 return ret;
504} 333}
505 334
506#define __EXEC_OBJECT_HAS_FENCE (1<<31) 335#define __EXEC_OBJECT_HAS_PIN (1<<31)
336#define __EXEC_OBJECT_HAS_FENCE (1<<30)
507 337
508static int 338static int
509need_reloc_mappable(struct drm_i915_gem_object *obj) 339need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -513,9 +343,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
513} 343}
514 344
515static int 345static int
516pin_and_fence_object(struct drm_i915_gem_object *obj, 346i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
517 struct intel_ring_buffer *ring) 347 struct intel_ring_buffer *ring)
518{ 348{
349 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
519 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 350 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
520 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 351 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
521 bool need_fence, need_mappable; 352 bool need_fence, need_mappable;
@@ -527,15 +358,17 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
527 obj->tiling_mode != I915_TILING_NONE; 358 obj->tiling_mode != I915_TILING_NONE;
528 need_mappable = need_fence || need_reloc_mappable(obj); 359 need_mappable = need_fence || need_reloc_mappable(obj);
529 360
530 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); 361 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
531 if (ret) 362 if (ret)
532 return ret; 363 return ret;
533 364
365 entry->flags |= __EXEC_OBJECT_HAS_PIN;
366
534 if (has_fenced_gpu_access) { 367 if (has_fenced_gpu_access) {
535 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 368 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
536 ret = i915_gem_object_get_fence(obj); 369 ret = i915_gem_object_get_fence(obj);
537 if (ret) 370 if (ret)
538 goto err_unpin; 371 return ret;
539 372
540 if (i915_gem_object_pin_fence(obj)) 373 if (i915_gem_object_pin_fence(obj))
541 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 374 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -544,12 +377,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
544 } 377 }
545 } 378 }
546 379
380 /* Ensure ppgtt mapping exists if needed */
381 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
382 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
383 obj, obj->cache_level);
384
385 obj->has_aliasing_ppgtt_mapping = 1;
386 }
387
547 entry->offset = obj->gtt_offset; 388 entry->offset = obj->gtt_offset;
548 return 0; 389 return 0;
390}
549 391
550err_unpin: 392static void
551 i915_gem_object_unpin(obj); 393i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
552 return ret; 394{
395 struct drm_i915_gem_exec_object2 *entry;
396
397 if (!obj->gtt_space)
398 return;
399
400 entry = obj->exec_entry;
401
402 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
403 i915_gem_object_unpin_fence(obj);
404
405 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
406 i915_gem_object_unpin(obj);
407
408 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
553} 409}
554 410
555static int 411static int
@@ -557,11 +413,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
557 struct drm_file *file, 413 struct drm_file *file,
558 struct list_head *objects) 414 struct list_head *objects)
559{ 415{
560 drm_i915_private_t *dev_priv = ring->dev->dev_private;
561 struct drm_i915_gem_object *obj; 416 struct drm_i915_gem_object *obj;
562 int ret, retry;
563 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
564 struct list_head ordered_objects; 417 struct list_head ordered_objects;
418 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
419 int retry;
565 420
566 INIT_LIST_HEAD(&ordered_objects); 421 INIT_LIST_HEAD(&ordered_objects);
567 while (!list_empty(objects)) { 422 while (!list_empty(objects)) {
@@ -586,6 +441,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
586 441
587 obj->base.pending_read_domains = 0; 442 obj->base.pending_read_domains = 0;
588 obj->base.pending_write_domain = 0; 443 obj->base.pending_write_domain = 0;
444 obj->pending_fenced_gpu_access = false;
589 } 445 }
590 list_splice(&ordered_objects, objects); 446 list_splice(&ordered_objects, objects);
591 447
@@ -598,12 +454,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
598 * 2. Bind new objects. 454 * 2. Bind new objects.
599 * 3. Decrement pin count. 455 * 3. Decrement pin count.
600 * 456 *
601 * This avoid unnecessary unbinding of later objects in order to makr 457 * This avoid unnecessary unbinding of later objects in order to make
602 * room for the earlier objects *unless* we need to defragment. 458 * room for the earlier objects *unless* we need to defragment.
603 */ 459 */
604 retry = 0; 460 retry = 0;
605 do { 461 do {
606 ret = 0; 462 int ret = 0;
607 463
608 /* Unbind any ill-fitting objects or pin. */ 464 /* Unbind any ill-fitting objects or pin. */
609 list_for_each_entry(obj, objects, exec_list) { 465 list_for_each_entry(obj, objects, exec_list) {
@@ -623,7 +479,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
623 (need_mappable && !obj->map_and_fenceable)) 479 (need_mappable && !obj->map_and_fenceable))
624 ret = i915_gem_object_unbind(obj); 480 ret = i915_gem_object_unbind(obj);
625 else 481 else
626 ret = pin_and_fence_object(obj, ring); 482 ret = i915_gem_execbuffer_reserve_object(obj, ring);
627 if (ret) 483 if (ret)
628 goto err; 484 goto err;
629 } 485 }
@@ -633,77 +489,22 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
633 if (obj->gtt_space) 489 if (obj->gtt_space)
634 continue; 490 continue;
635 491
636 ret = pin_and_fence_object(obj, ring); 492 ret = i915_gem_execbuffer_reserve_object(obj, ring);
637 if (ret) { 493 if (ret)
638 int ret_ignore; 494 goto err;
639
640 /* This can potentially raise a harmless
641 * -EINVAL if we failed to bind in the above
642 * call. It cannot raise -EINTR since we know
643 * that the bo is freshly bound and so will
644 * not need to be flushed or waited upon.
645 */
646 ret_ignore = i915_gem_object_unbind(obj);
647 (void)ret_ignore;
648 WARN_ON(obj->gtt_space);
649 break;
650 }
651 } 495 }
652 496
653 /* Decrement pin count for bound objects */ 497err: /* Decrement pin count for bound objects */
654 list_for_each_entry(obj, objects, exec_list) { 498 list_for_each_entry(obj, objects, exec_list)
655 struct drm_i915_gem_exec_object2 *entry; 499 i915_gem_execbuffer_unreserve_object(obj);
656
657 if (!obj->gtt_space)
658 continue;
659
660 entry = obj->exec_entry;
661 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
662 i915_gem_object_unpin_fence(obj);
663 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
664 }
665
666 i915_gem_object_unpin(obj);
667
668 /* ... and ensure ppgtt mapping exist if needed. */
669 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
670 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
671 obj, obj->cache_level);
672 500
673 obj->has_aliasing_ppgtt_mapping = 1; 501 if (ret != -ENOSPC || retry++)
674 }
675 }
676
677 if (ret != -ENOSPC || retry > 1)
678 return ret; 502 return ret;
679 503
680 /* First attempt, just clear anything that is purgeable. 504 ret = i915_gem_evict_everything(ring->dev);
681 * Second attempt, clear the entire GTT.
682 */
683 ret = i915_gem_evict_everything(ring->dev, retry == 0);
684 if (ret) 505 if (ret)
685 return ret; 506 return ret;
686
687 retry++;
688 } while (1); 507 } while (1);
689
690err:
691 list_for_each_entry_continue_reverse(obj, objects, exec_list) {
692 struct drm_i915_gem_exec_object2 *entry;
693
694 if (!obj->gtt_space)
695 continue;
696
697 entry = obj->exec_entry;
698 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
699 i915_gem_object_unpin_fence(obj);
700 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
701 }
702
703 i915_gem_object_unpin(obj);
704 }
705
706 return ret;
707} 508}
708 509
709static int 510static int
@@ -809,18 +610,6 @@ err:
809 return ret; 610 return ret;
810} 611}
811 612
812static void
813i915_gem_execbuffer_flush(struct drm_device *dev,
814 uint32_t invalidate_domains,
815 uint32_t flush_domains)
816{
817 if (flush_domains & I915_GEM_DOMAIN_CPU)
818 intel_gtt_chipset_flush();
819
820 if (flush_domains & I915_GEM_DOMAIN_GTT)
821 wmb();
822}
823
824static int 613static int
825i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 614i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
826{ 615{
@@ -853,48 +642,45 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
853 return 0; 642 return 0;
854} 643}
855 644
856
857static int 645static int
858i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 646i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
859 struct list_head *objects) 647 struct list_head *objects)
860{ 648{
861 struct drm_i915_gem_object *obj; 649 struct drm_i915_gem_object *obj;
862 struct change_domains cd; 650 uint32_t flush_domains = 0;
651 uint32_t flips = 0;
863 int ret; 652 int ret;
864 653
865 memset(&cd, 0, sizeof(cd)); 654 list_for_each_entry(obj, objects, exec_list) {
866 list_for_each_entry(obj, objects, exec_list) 655 ret = i915_gem_object_sync(obj, ring);
867 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
868
869 if (cd.invalidate_domains | cd.flush_domains) {
870 i915_gem_execbuffer_flush(ring->dev,
871 cd.invalidate_domains,
872 cd.flush_domains);
873 }
874
875 if (cd.flips) {
876 ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
877 if (ret) 656 if (ret)
878 return ret; 657 return ret;
658
659 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
660 i915_gem_clflush_object(obj);
661
662 if (obj->base.pending_write_domain)
663 flips |= atomic_read(&obj->pending_flip);
664
665 flush_domains |= obj->base.write_domain;
879 } 666 }
880 667
881 list_for_each_entry(obj, objects, exec_list) { 668 if (flips) {
882 ret = i915_gem_object_sync(obj, ring); 669 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
883 if (ret) 670 if (ret)
884 return ret; 671 return ret;
885 } 672 }
886 673
674 if (flush_domains & I915_GEM_DOMAIN_CPU)
675 intel_gtt_chipset_flush();
676
677 if (flush_domains & I915_GEM_DOMAIN_GTT)
678 wmb();
679
887 /* Unconditionally invalidate gpu caches and ensure that we do flush 680 /* Unconditionally invalidate gpu caches and ensure that we do flush
888 * any residual writes from the previous batch. 681 * any residual writes from the previous batch.
889 */ 682 */
890 ret = i915_gem_flush_ring(ring, 683 return intel_ring_invalidate_all_caches(ring);
891 I915_GEM_GPU_DOMAINS,
892 ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
893 if (ret)
894 return ret;
895
896 ring->gpu_caches_dirty = false;
897 return 0;
898} 684}
899 685
900static bool 686static bool
@@ -942,9 +728,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
942 struct drm_i915_gem_object *obj; 728 struct drm_i915_gem_object *obj;
943 729
944 list_for_each_entry(obj, objects, exec_list) { 730 list_for_each_entry(obj, objects, exec_list) {
945 u32 old_read = obj->base.read_domains; 731 u32 old_read = obj->base.read_domains;
946 u32 old_write = obj->base.write_domain; 732 u32 old_write = obj->base.write_domain;
947
948 733
949 obj->base.read_domains = obj->base.pending_read_domains; 734 obj->base.read_domains = obj->base.pending_read_domains;
950 obj->base.write_domain = obj->base.pending_write_domain; 735 obj->base.write_domain = obj->base.pending_write_domain;
@@ -953,17 +738,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
953 i915_gem_object_move_to_active(obj, ring, seqno); 738 i915_gem_object_move_to_active(obj, ring, seqno);
954 if (obj->base.write_domain) { 739 if (obj->base.write_domain) {
955 obj->dirty = 1; 740 obj->dirty = 1;
956 obj->pending_gpu_write = true; 741 obj->last_write_seqno = seqno;
957 list_move_tail(&obj->gpu_write_list,
958 &ring->gpu_write_list);
959 if (obj->pin_count) /* check for potential scanout */ 742 if (obj->pin_count) /* check for potential scanout */
960 intel_mark_busy(ring->dev, obj); 743 intel_mark_fb_busy(obj);
961 } 744 }
962 745
963 trace_i915_gem_object_change_domain(obj, old_read, old_write); 746 trace_i915_gem_object_change_domain(obj, old_read, old_write);
964 } 747 }
965
966 intel_mark_busy(ring->dev, NULL);
967} 748}
968 749
969static void 750static void
@@ -971,16 +752,11 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
971 struct drm_file *file, 752 struct drm_file *file,
972 struct intel_ring_buffer *ring) 753 struct intel_ring_buffer *ring)
973{ 754{
974 struct drm_i915_gem_request *request;
975
976 /* Unconditionally force add_request to emit a full flush. */ 755 /* Unconditionally force add_request to emit a full flush. */
977 ring->gpu_caches_dirty = true; 756 ring->gpu_caches_dirty = true;
978 757
979 /* Add a breadcrumb for the completion of the batch buffer */ 758 /* Add a breadcrumb for the completion of the batch buffer */
980 request = kzalloc(sizeof(*request), GFP_KERNEL); 759 (void)i915_add_request(ring, file, NULL);
981 if (request == NULL || i915_add_request(ring, file, request)) {
982 kfree(request);
983 }
984} 760}
985 761
986static int 762static int
@@ -1326,8 +1102,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1326 return -ENOMEM; 1102 return -ENOMEM;
1327 } 1103 }
1328 ret = copy_from_user(exec_list, 1104 ret = copy_from_user(exec_list,
1329 (struct drm_i915_relocation_entry __user *) 1105 (void __user *)(uintptr_t)args->buffers_ptr,
1330 (uintptr_t) args->buffers_ptr,
1331 sizeof(*exec_list) * args->buffer_count); 1106 sizeof(*exec_list) * args->buffer_count);
1332 if (ret != 0) { 1107 if (ret != 0) {
1333 DRM_DEBUG("copy %d exec entries failed %d\n", 1108 DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1366,8 +1141,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1366 for (i = 0; i < args->buffer_count; i++) 1141 for (i = 0; i < args->buffer_count; i++)
1367 exec_list[i].offset = exec2_list[i].offset; 1142 exec_list[i].offset = exec2_list[i].offset;
1368 /* ... and back out to userspace */ 1143 /* ... and back out to userspace */
1369 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1144 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1370 (uintptr_t) args->buffers_ptr,
1371 exec_list, 1145 exec_list,
1372 sizeof(*exec_list) * args->buffer_count); 1146 sizeof(*exec_list) * args->buffer_count);
1373 if (ret) { 1147 if (ret) {
@@ -1421,8 +1195,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1421 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1195 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1422 if (!ret) { 1196 if (!ret) {
1423 /* Copy the new buffer offsets back to the user's exec list. */ 1197 /* Copy the new buffer offsets back to the user's exec list. */
1424 ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1198 ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1425 (uintptr_t) args->buffers_ptr,
1426 exec2_list, 1199 exec2_list,
1427 sizeof(*exec2_list) * args->buffer_count); 1200 sizeof(*exec2_list) * args->buffer_count);
1428 if (ret) { 1201 if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 69261acb94b3..df470b5e8d36 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -166,8 +166,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
166} 166}
167 167
168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 struct scatterlist *sg_list, 169 const struct sg_table *pages,
170 unsigned sg_len,
171 unsigned first_entry, 170 unsigned first_entry,
172 uint32_t pte_flags) 171 uint32_t pte_flags)
173{ 172{
@@ -179,12 +178,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
179 struct scatterlist *sg; 178 struct scatterlist *sg;
180 179
181 /* init sg walking */ 180 /* init sg walking */
182 sg = sg_list; 181 sg = pages->sgl;
183 i = 0; 182 i = 0;
184 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 183 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
185 m = 0; 184 m = 0;
186 185
187 while (i < sg_len) { 186 while (i < pages->nents) {
188 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 187 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
189 188
190 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -193,13 +192,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
193 pt_vaddr[j] = pte | pte_flags; 192 pt_vaddr[j] = pte | pte_flags;
194 193
195 /* grab the next page */ 194 /* grab the next page */
196 m++; 195 if (++m == segment_len) {
197 if (m == segment_len) { 196 if (++i == pages->nents)
198 sg = sg_next(sg);
199 i++;
200 if (i == sg_len)
201 break; 197 break;
202 198
199 sg = sg_next(sg);
203 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 200 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
204 m = 0; 201 m = 0;
205 } 202 }
@@ -212,44 +209,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
212 } 209 }
213} 210}
214 211
215static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
216 unsigned first_entry, unsigned num_entries,
217 struct page **pages, uint32_t pte_flags)
218{
219 uint32_t *pt_vaddr, pte;
220 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
221 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
222 unsigned last_pte, i;
223 dma_addr_t page_addr;
224
225 while (num_entries) {
226 last_pte = first_pte + num_entries;
227 last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
228
229 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
230
231 for (i = first_pte; i < last_pte; i++) {
232 page_addr = page_to_phys(*pages);
233 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
234 pt_vaddr[i] = pte | pte_flags;
235
236 pages++;
237 }
238
239 kunmap_atomic(pt_vaddr);
240
241 num_entries -= last_pte - first_pte;
242 first_pte = 0;
243 act_pd++;
244 }
245}
246
247void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 212void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
248 struct drm_i915_gem_object *obj, 213 struct drm_i915_gem_object *obj,
249 enum i915_cache_level cache_level) 214 enum i915_cache_level cache_level)
250{ 215{
251 struct drm_device *dev = obj->base.dev;
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t pte_flags = GEN6_PTE_VALID; 216 uint32_t pte_flags = GEN6_PTE_VALID;
254 217
255 switch (cache_level) { 218 switch (cache_level) {
@@ -260,7 +223,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
260 pte_flags |= GEN6_PTE_CACHE_LLC; 223 pte_flags |= GEN6_PTE_CACHE_LLC;
261 break; 224 break;
262 case I915_CACHE_NONE: 225 case I915_CACHE_NONE:
263 if (IS_HASWELL(dev)) 226 if (IS_HASWELL(obj->base.dev))
264 pte_flags |= HSW_PTE_UNCACHED; 227 pte_flags |= HSW_PTE_UNCACHED;
265 else 228 else
266 pte_flags |= GEN6_PTE_UNCACHED; 229 pte_flags |= GEN6_PTE_UNCACHED;
@@ -269,26 +232,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
269 BUG(); 232 BUG();
270 } 233 }
271 234
272 if (obj->sg_table) { 235 i915_ppgtt_insert_sg_entries(ppgtt,
273 i915_ppgtt_insert_sg_entries(ppgtt, 236 obj->pages,
274 obj->sg_table->sgl, 237 obj->gtt_space->start >> PAGE_SHIFT,
275 obj->sg_table->nents, 238 pte_flags);
276 obj->gtt_space->start >> PAGE_SHIFT,
277 pte_flags);
278 } else if (dev_priv->mm.gtt->needs_dmar) {
279 BUG_ON(!obj->sg_list);
280
281 i915_ppgtt_insert_sg_entries(ppgtt,
282 obj->sg_list,
283 obj->num_sg,
284 obj->gtt_space->start >> PAGE_SHIFT,
285 pte_flags);
286 } else
287 i915_ppgtt_insert_pages(ppgtt,
288 obj->gtt_space->start >> PAGE_SHIFT,
289 obj->base.size >> PAGE_SHIFT,
290 obj->pages,
291 pte_flags);
292} 239}
293 240
294void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 241void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -350,7 +297,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
350 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 297 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
351 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 298 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
352 299
353 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 300 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
354 i915_gem_clflush_object(obj); 301 i915_gem_clflush_object(obj);
355 i915_gem_gtt_bind_object(obj, obj->cache_level); 302 i915_gem_gtt_bind_object(obj, obj->cache_level);
356 } 303 }
@@ -360,44 +307,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
360 307
361int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 308int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
362{ 309{
363 struct drm_device *dev = obj->base.dev; 310 if (obj->has_dma_mapping)
364 struct drm_i915_private *dev_priv = dev->dev_private;
365
366 /* don't map imported dma buf objects */
367 if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
368 return intel_gtt_map_memory(obj->pages,
369 obj->base.size >> PAGE_SHIFT,
370 &obj->sg_list,
371 &obj->num_sg);
372 else
373 return 0; 311 return 0;
312
313 if (!dma_map_sg(&obj->base.dev->pdev->dev,
314 obj->pages->sgl, obj->pages->nents,
315 PCI_DMA_BIDIRECTIONAL))
316 return -ENOSPC;
317
318 return 0;
374} 319}
375 320
376void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 321void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
377 enum i915_cache_level cache_level) 322 enum i915_cache_level cache_level)
378{ 323{
379 struct drm_device *dev = obj->base.dev; 324 struct drm_device *dev = obj->base.dev;
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 325 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
382 326
383 if (obj->sg_table) { 327 intel_gtt_insert_sg_entries(obj->pages,
384 intel_gtt_insert_sg_entries(obj->sg_table->sgl, 328 obj->gtt_space->start >> PAGE_SHIFT,
385 obj->sg_table->nents, 329 agp_type);
386 obj->gtt_space->start >> PAGE_SHIFT,
387 agp_type);
388 } else if (dev_priv->mm.gtt->needs_dmar) {
389 BUG_ON(!obj->sg_list);
390
391 intel_gtt_insert_sg_entries(obj->sg_list,
392 obj->num_sg,
393 obj->gtt_space->start >> PAGE_SHIFT,
394 agp_type);
395 } else
396 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
397 obj->base.size >> PAGE_SHIFT,
398 obj->pages,
399 agp_type);
400
401 obj->has_global_gtt_mapping = 1; 330 obj->has_global_gtt_mapping = 1;
402} 331}
403 332
@@ -417,14 +346,31 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
417 346
418 interruptible = do_idling(dev_priv); 347 interruptible = do_idling(dev_priv);
419 348
420 if (obj->sg_list) { 349 if (!obj->has_dma_mapping)
421 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 350 dma_unmap_sg(&dev->pdev->dev,
422 obj->sg_list = NULL; 351 obj->pages->sgl, obj->pages->nents,
423 } 352 PCI_DMA_BIDIRECTIONAL);
424 353
425 undo_idling(dev_priv, interruptible); 354 undo_idling(dev_priv, interruptible);
426} 355}
427 356
357static void i915_gtt_color_adjust(struct drm_mm_node *node,
358 unsigned long color,
359 unsigned long *start,
360 unsigned long *end)
361{
362 if (node->color != color)
363 *start += 4096;
364
365 if (!list_empty(&node->node_list)) {
366 node = list_entry(node->node_list.next,
367 struct drm_mm_node,
368 node_list);
369 if (node->allocated && node->color != color)
370 *end -= 4096;
371 }
372}
373
428void i915_gem_init_global_gtt(struct drm_device *dev, 374void i915_gem_init_global_gtt(struct drm_device *dev,
429 unsigned long start, 375 unsigned long start,
430 unsigned long mappable_end, 376 unsigned long mappable_end,
@@ -434,6 +380,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
434 380
435 /* Substract the guard page ... */ 381 /* Substract the guard page ... */
436 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 382 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
383 if (!HAS_LLC(dev))
384 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
437 385
438 dev_priv->mm.gtt_start = start; 386 dev_priv->mm.gtt_start = start;
439 dev_priv->mm.gtt_mappable_end = mappable_end; 387 dev_priv->mm.gtt_mappable_end = mappable_end;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c2b7b67e410d..3208650a235c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -469,18 +469,20 @@ i915_gem_swizzle_page(struct page *page)
469void 469void
470i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 470i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
471{ 471{
472 struct scatterlist *sg;
472 int page_count = obj->base.size >> PAGE_SHIFT; 473 int page_count = obj->base.size >> PAGE_SHIFT;
473 int i; 474 int i;
474 475
475 if (obj->bit_17 == NULL) 476 if (obj->bit_17 == NULL)
476 return; 477 return;
477 478
478 for (i = 0; i < page_count; i++) { 479 for_each_sg(obj->pages->sgl, sg, page_count, i) {
479 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; 480 struct page *page = sg_page(sg);
481 char new_bit_17 = page_to_phys(page) >> 17;
480 if ((new_bit_17 & 0x1) != 482 if ((new_bit_17 & 0x1) !=
481 (test_bit(i, obj->bit_17) != 0)) { 483 (test_bit(i, obj->bit_17) != 0)) {
482 i915_gem_swizzle_page(obj->pages[i]); 484 i915_gem_swizzle_page(page);
483 set_page_dirty(obj->pages[i]); 485 set_page_dirty(page);
484 } 486 }
485 } 487 }
486} 488}
@@ -488,6 +490,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
488void 490void
489i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 491i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
490{ 492{
493 struct scatterlist *sg;
491 int page_count = obj->base.size >> PAGE_SHIFT; 494 int page_count = obj->base.size >> PAGE_SHIFT;
492 int i; 495 int i;
493 496
@@ -501,8 +504,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
501 } 504 }
502 } 505 }
503 506
504 for (i = 0; i < page_count; i++) { 507 for_each_sg(obj->pages->sgl, sg, page_count, i) {
505 if (page_to_phys(obj->pages[i]) & (1 << 17)) 508 struct page *page = sg_page(sg);
509 if (page_to_phys(page) & (1 << 17))
506 __set_bit(i, obj->bit_17); 510 __set_bit(i, obj->bit_17);
507 else 511 else
508 __clear_bit(i, obj->bit_17); 512 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 505357886bbb..4e9888388c0c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -295,11 +295,21 @@ static void i915_hotplug_work_func(struct work_struct *work)
295 drm_helper_hpd_irq_event(dev); 295 drm_helper_hpd_irq_event(dev);
296} 296}
297 297
298static void i915_handle_rps_change(struct drm_device *dev) 298/* defined intel_pm.c */
299extern spinlock_t mchdev_lock;
300
301static void ironlake_handle_rps_change(struct drm_device *dev)
299{ 302{
300 drm_i915_private_t *dev_priv = dev->dev_private; 303 drm_i915_private_t *dev_priv = dev->dev_private;
301 u32 busy_up, busy_down, max_avg, min_avg; 304 u32 busy_up, busy_down, max_avg, min_avg;
302 u8 new_delay = dev_priv->cur_delay; 305 u8 new_delay;
306 unsigned long flags;
307
308 spin_lock_irqsave(&mchdev_lock, flags);
309
310 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
311
312 new_delay = dev_priv->ips.cur_delay;
303 313
304 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 314 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
305 busy_up = I915_READ(RCPREVBSYTUPAVG); 315 busy_up = I915_READ(RCPREVBSYTUPAVG);
@@ -309,19 +319,21 @@ static void i915_handle_rps_change(struct drm_device *dev)
309 319
310 /* Handle RCS change request from hw */ 320 /* Handle RCS change request from hw */
311 if (busy_up > max_avg) { 321 if (busy_up > max_avg) {
312 if (dev_priv->cur_delay != dev_priv->max_delay) 322 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
313 new_delay = dev_priv->cur_delay - 1; 323 new_delay = dev_priv->ips.cur_delay - 1;
314 if (new_delay < dev_priv->max_delay) 324 if (new_delay < dev_priv->ips.max_delay)
315 new_delay = dev_priv->max_delay; 325 new_delay = dev_priv->ips.max_delay;
316 } else if (busy_down < min_avg) { 326 } else if (busy_down < min_avg) {
317 if (dev_priv->cur_delay != dev_priv->min_delay) 327 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
318 new_delay = dev_priv->cur_delay + 1; 328 new_delay = dev_priv->ips.cur_delay + 1;
319 if (new_delay > dev_priv->min_delay) 329 if (new_delay > dev_priv->ips.min_delay)
320 new_delay = dev_priv->min_delay; 330 new_delay = dev_priv->ips.min_delay;
321 } 331 }
322 332
323 if (ironlake_set_drps(dev, new_delay)) 333 if (ironlake_set_drps(dev, new_delay))
324 dev_priv->cur_delay = new_delay; 334 dev_priv->ips.cur_delay = new_delay;
335
336 spin_unlock_irqrestore(&mchdev_lock, flags);
325 337
326 return; 338 return;
327} 339}
@@ -334,7 +346,7 @@ static void notify_ring(struct drm_device *dev,
334 if (ring->obj == NULL) 346 if (ring->obj == NULL)
335 return; 347 return;
336 348
337 trace_i915_gem_request_complete(ring, ring->get_seqno(ring)); 349 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
338 350
339 wake_up_all(&ring->irq_queue); 351 wake_up_all(&ring->irq_queue);
340 if (i915_enable_hangcheck) { 352 if (i915_enable_hangcheck) {
@@ -348,16 +360,16 @@ static void notify_ring(struct drm_device *dev,
348static void gen6_pm_rps_work(struct work_struct *work) 360static void gen6_pm_rps_work(struct work_struct *work)
349{ 361{
350 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 362 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
351 rps_work); 363 rps.work);
352 u32 pm_iir, pm_imr; 364 u32 pm_iir, pm_imr;
353 u8 new_delay; 365 u8 new_delay;
354 366
355 spin_lock_irq(&dev_priv->rps_lock); 367 spin_lock_irq(&dev_priv->rps.lock);
356 pm_iir = dev_priv->pm_iir; 368 pm_iir = dev_priv->rps.pm_iir;
357 dev_priv->pm_iir = 0; 369 dev_priv->rps.pm_iir = 0;
358 pm_imr = I915_READ(GEN6_PMIMR); 370 pm_imr = I915_READ(GEN6_PMIMR);
359 I915_WRITE(GEN6_PMIMR, 0); 371 I915_WRITE(GEN6_PMIMR, 0);
360 spin_unlock_irq(&dev_priv->rps_lock); 372 spin_unlock_irq(&dev_priv->rps.lock);
361 373
362 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 374 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
363 return; 375 return;
@@ -365,11 +377,17 @@ static void gen6_pm_rps_work(struct work_struct *work)
365 mutex_lock(&dev_priv->dev->struct_mutex); 377 mutex_lock(&dev_priv->dev->struct_mutex);
366 378
367 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 379 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
368 new_delay = dev_priv->cur_delay + 1; 380 new_delay = dev_priv->rps.cur_delay + 1;
369 else 381 else
370 new_delay = dev_priv->cur_delay - 1; 382 new_delay = dev_priv->rps.cur_delay - 1;
371 383
372 gen6_set_rps(dev_priv->dev, new_delay); 384 /* sysfs frequency interfaces may have snuck in while servicing the
385 * interrupt
386 */
387 if (!(new_delay > dev_priv->rps.max_delay ||
388 new_delay < dev_priv->rps.min_delay)) {
389 gen6_set_rps(dev_priv->dev, new_delay);
390 }
373 391
374 mutex_unlock(&dev_priv->dev->struct_mutex); 392 mutex_unlock(&dev_priv->dev->struct_mutex);
375} 393}
@@ -443,7 +461,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
443 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 461 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
444 unsigned long flags; 462 unsigned long flags;
445 463
446 if (!IS_IVYBRIDGE(dev)) 464 if (!HAS_L3_GPU_CACHE(dev))
447 return; 465 return;
448 466
449 spin_lock_irqsave(&dev_priv->irq_lock, flags); 467 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -487,19 +505,19 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
487 * IIR bits should never already be set because IMR should 505 * IIR bits should never already be set because IMR should
488 * prevent an interrupt from being shown in IIR. The warning 506 * prevent an interrupt from being shown in IIR. The warning
489 * displays a case where we've unsafely cleared 507 * displays a case where we've unsafely cleared
490 * dev_priv->pm_iir. Although missing an interrupt of the same 508 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
491 * type is not a problem, it displays a problem in the logic. 509 * type is not a problem, it displays a problem in the logic.
492 * 510 *
493 * The mask bit in IMR is cleared by rps_work. 511 * The mask bit in IMR is cleared by dev_priv->rps.work.
494 */ 512 */
495 513
496 spin_lock_irqsave(&dev_priv->rps_lock, flags); 514 spin_lock_irqsave(&dev_priv->rps.lock, flags);
497 dev_priv->pm_iir |= pm_iir; 515 dev_priv->rps.pm_iir |= pm_iir;
498 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); 516 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
499 POSTING_READ(GEN6_PMIMR); 517 POSTING_READ(GEN6_PMIMR);
500 spin_unlock_irqrestore(&dev_priv->rps_lock, flags); 518 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
501 519
502 queue_work(dev_priv->wq, &dev_priv->rps_work); 520 queue_work(dev_priv->wq, &dev_priv->rps.work);
503} 521}
504 522
505static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 523static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
@@ -792,10 +810,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
792 ibx_irq_handler(dev, pch_iir); 810 ibx_irq_handler(dev, pch_iir);
793 } 811 }
794 812
795 if (de_iir & DE_PCU_EVENT) { 813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
796 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 814 ironlake_handle_rps_change(dev);
797 i915_handle_rps_change(dev);
798 }
799 815
800 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
801 gen6_queue_rps_work(dev_priv, pm_iir); 817 gen6_queue_rps_work(dev_priv, pm_iir);
@@ -842,26 +858,55 @@ static void i915_error_work_func(struct work_struct *work)
842 } 858 }
843} 859}
844 860
861/* NB: please notice the memset */
862static void i915_get_extra_instdone(struct drm_device *dev,
863 uint32_t *instdone)
864{
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
867
868 switch(INTEL_INFO(dev)->gen) {
869 case 2:
870 case 3:
871 instdone[0] = I915_READ(INSTDONE);
872 break;
873 case 4:
874 case 5:
875 case 6:
876 instdone[0] = I915_READ(INSTDONE_I965);
877 instdone[1] = I915_READ(INSTDONE1);
878 break;
879 default:
880 WARN_ONCE(1, "Unsupported platform\n");
881 case 7:
882 instdone[0] = I915_READ(GEN7_INSTDONE_1);
883 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
884 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
885 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
886 break;
887 }
888}
889
845#ifdef CONFIG_DEBUG_FS 890#ifdef CONFIG_DEBUG_FS
846static struct drm_i915_error_object * 891static struct drm_i915_error_object *
847i915_error_object_create(struct drm_i915_private *dev_priv, 892i915_error_object_create(struct drm_i915_private *dev_priv,
848 struct drm_i915_gem_object *src) 893 struct drm_i915_gem_object *src)
849{ 894{
850 struct drm_i915_error_object *dst; 895 struct drm_i915_error_object *dst;
851 int page, page_count; 896 int i, count;
852 u32 reloc_offset; 897 u32 reloc_offset;
853 898
854 if (src == NULL || src->pages == NULL) 899 if (src == NULL || src->pages == NULL)
855 return NULL; 900 return NULL;
856 901
857 page_count = src->base.size / PAGE_SIZE; 902 count = src->base.size / PAGE_SIZE;
858 903
859 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); 904 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
860 if (dst == NULL) 905 if (dst == NULL)
861 return NULL; 906 return NULL;
862 907
863 reloc_offset = src->gtt_offset; 908 reloc_offset = src->gtt_offset;
864 for (page = 0; page < page_count; page++) { 909 for (i = 0; i < count; i++) {
865 unsigned long flags; 910 unsigned long flags;
866 void *d; 911 void *d;
867 912
@@ -884,30 +929,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
884 memcpy_fromio(d, s, PAGE_SIZE); 929 memcpy_fromio(d, s, PAGE_SIZE);
885 io_mapping_unmap_atomic(s); 930 io_mapping_unmap_atomic(s);
886 } else { 931 } else {
932 struct page *page;
887 void *s; 933 void *s;
888 934
889 drm_clflush_pages(&src->pages[page], 1); 935 page = i915_gem_object_get_page(src, i);
936
937 drm_clflush_pages(&page, 1);
890 938
891 s = kmap_atomic(src->pages[page]); 939 s = kmap_atomic(page);
892 memcpy(d, s, PAGE_SIZE); 940 memcpy(d, s, PAGE_SIZE);
893 kunmap_atomic(s); 941 kunmap_atomic(s);
894 942
895 drm_clflush_pages(&src->pages[page], 1); 943 drm_clflush_pages(&page, 1);
896 } 944 }
897 local_irq_restore(flags); 945 local_irq_restore(flags);
898 946
899 dst->pages[page] = d; 947 dst->pages[i] = d;
900 948
901 reloc_offset += PAGE_SIZE; 949 reloc_offset += PAGE_SIZE;
902 } 950 }
903 dst->page_count = page_count; 951 dst->page_count = count;
904 dst->gtt_offset = src->gtt_offset; 952 dst->gtt_offset = src->gtt_offset;
905 953
906 return dst; 954 return dst;
907 955
908unwind: 956unwind:
909 while (page--) 957 while (i--)
910 kfree(dst->pages[page]); 958 kfree(dst->pages[i]);
911 kfree(dst); 959 kfree(dst);
912 return NULL; 960 return NULL;
913} 961}
@@ -948,7 +996,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
948{ 996{
949 err->size = obj->base.size; 997 err->size = obj->base.size;
950 err->name = obj->base.name; 998 err->name = obj->base.name;
951 err->seqno = obj->last_rendering_seqno; 999 err->rseqno = obj->last_read_seqno;
1000 err->wseqno = obj->last_write_seqno;
952 err->gtt_offset = obj->gtt_offset; 1001 err->gtt_offset = obj->gtt_offset;
953 err->read_domains = obj->base.read_domains; 1002 err->read_domains = obj->base.read_domains;
954 err->write_domain = obj->base.write_domain; 1003 err->write_domain = obj->base.write_domain;
@@ -1038,12 +1087,12 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1038 if (!ring->get_seqno) 1087 if (!ring->get_seqno)
1039 return NULL; 1088 return NULL;
1040 1089
1041 seqno = ring->get_seqno(ring); 1090 seqno = ring->get_seqno(ring, false);
1042 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1043 if (obj->ring != ring) 1092 if (obj->ring != ring)
1044 continue; 1093 continue;
1045 1094
1046 if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 1095 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1047 continue; 1096 continue;
1048 1097
1049 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1098 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
@@ -1079,10 +1128,8 @@ static void i915_record_ring_state(struct drm_device *dev,
1079 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1128 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1080 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1129 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1081 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1130 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1082 if (ring->id == RCS) { 1131 if (ring->id == RCS)
1083 error->instdone1 = I915_READ(INSTDONE1);
1084 error->bbaddr = I915_READ64(BB_ADDR); 1132 error->bbaddr = I915_READ64(BB_ADDR);
1085 }
1086 } else { 1133 } else {
1087 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1134 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1088 error->ipeir[ring->id] = I915_READ(IPEIR); 1135 error->ipeir[ring->id] = I915_READ(IPEIR);
@@ -1092,7 +1139,7 @@ static void i915_record_ring_state(struct drm_device *dev,
1092 1139
1093 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1140 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1094 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1141 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1095 error->seqno[ring->id] = ring->get_seqno(ring); 1142 error->seqno[ring->id] = ring->get_seqno(ring, false);
1096 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1143 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1097 error->head[ring->id] = I915_READ_HEAD(ring); 1144 error->head[ring->id] = I915_READ_HEAD(ring);
1098 error->tail[ring->id] = I915_READ_TAIL(ring); 1145 error->tail[ring->id] = I915_READ_TAIL(ring);
@@ -1198,6 +1245,11 @@ static void i915_capture_error_state(struct drm_device *dev)
1198 error->done_reg = I915_READ(DONE_REG); 1245 error->done_reg = I915_READ(DONE_REG);
1199 } 1246 }
1200 1247
1248 if (INTEL_INFO(dev)->gen == 7)
1249 error->err_int = I915_READ(GEN7_ERR_INT);
1250
1251 i915_get_extra_instdone(dev, error->extra_instdone);
1252
1201 i915_gem_record_fences(dev, error); 1253 i915_gem_record_fences(dev, error);
1202 i915_gem_record_rings(dev, error); 1254 i915_gem_record_rings(dev, error);
1203 1255
@@ -1209,7 +1261,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1209 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1261 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1210 i++; 1262 i++;
1211 error->active_bo_count = i; 1263 error->active_bo_count = i;
1212 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) 1264 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1213 if (obj->pin_count) 1265 if (obj->pin_count)
1214 i++; 1266 i++;
1215 error->pinned_bo_count = i - error->active_bo_count; 1267 error->pinned_bo_count = i - error->active_bo_count;
@@ -1234,7 +1286,7 @@ static void i915_capture_error_state(struct drm_device *dev)
1234 error->pinned_bo_count = 1286 error->pinned_bo_count =
1235 capture_pinned_bo(error->pinned_bo, 1287 capture_pinned_bo(error->pinned_bo,
1236 error->pinned_bo_count, 1288 error->pinned_bo_count,
1237 &dev_priv->mm.gtt_list); 1289 &dev_priv->mm.bound_list);
1238 1290
1239 do_gettimeofday(&error->time); 1291 do_gettimeofday(&error->time);
1240 1292
@@ -1273,24 +1325,26 @@ void i915_destroy_error_state(struct drm_device *dev)
1273static void i915_report_and_clear_eir(struct drm_device *dev) 1325static void i915_report_and_clear_eir(struct drm_device *dev)
1274{ 1326{
1275 struct drm_i915_private *dev_priv = dev->dev_private; 1327 struct drm_i915_private *dev_priv = dev->dev_private;
1328 uint32_t instdone[I915_NUM_INSTDONE_REG];
1276 u32 eir = I915_READ(EIR); 1329 u32 eir = I915_READ(EIR);
1277 int pipe; 1330 int pipe, i;
1278 1331
1279 if (!eir) 1332 if (!eir)
1280 return; 1333 return;
1281 1334
1282 pr_err("render error detected, EIR: 0x%08x\n", eir); 1335 pr_err("render error detected, EIR: 0x%08x\n", eir);
1283 1336
1337 i915_get_extra_instdone(dev, instdone);
1338
1284 if (IS_G4X(dev)) { 1339 if (IS_G4X(dev)) {
1285 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1340 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1286 u32 ipeir = I915_READ(IPEIR_I965); 1341 u32 ipeir = I915_READ(IPEIR_I965);
1287 1342
1288 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1343 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1289 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1344 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1290 pr_err(" INSTDONE: 0x%08x\n", 1345 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1291 I915_READ(INSTDONE_I965)); 1346 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1292 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1347 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1293 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1294 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1348 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1295 I915_WRITE(IPEIR_I965, ipeir); 1349 I915_WRITE(IPEIR_I965, ipeir);
1296 POSTING_READ(IPEIR_I965); 1350 POSTING_READ(IPEIR_I965);
@@ -1324,12 +1378,13 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1324 if (eir & I915_ERROR_INSTRUCTION) { 1378 if (eir & I915_ERROR_INSTRUCTION) {
1325 pr_err("instruction error\n"); 1379 pr_err("instruction error\n");
1326 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1380 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1381 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1382 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1327 if (INTEL_INFO(dev)->gen < 4) { 1383 if (INTEL_INFO(dev)->gen < 4) {
1328 u32 ipeir = I915_READ(IPEIR); 1384 u32 ipeir = I915_READ(IPEIR);
1329 1385
1330 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1386 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1331 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1387 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1332 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1333 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1388 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1334 I915_WRITE(IPEIR, ipeir); 1389 I915_WRITE(IPEIR, ipeir);
1335 POSTING_READ(IPEIR); 1390 POSTING_READ(IPEIR);
@@ -1338,10 +1393,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1338 1393
1339 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1394 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1340 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1395 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1341 pr_err(" INSTDONE: 0x%08x\n",
1342 I915_READ(INSTDONE_I965));
1343 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1396 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1344 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1345 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1397 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1346 I915_WRITE(IPEIR_I965, ipeir); 1398 I915_WRITE(IPEIR_I965, ipeir);
1347 POSTING_READ(IPEIR_I965); 1399 POSTING_READ(IPEIR_I965);
@@ -1589,7 +1641,8 @@ ring_last_seqno(struct intel_ring_buffer *ring)
1589static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1641static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1590{ 1642{
1591 if (list_empty(&ring->request_list) || 1643 if (list_empty(&ring->request_list) ||
1592 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1644 i915_seqno_passed(ring->get_seqno(ring, false),
1645 ring_last_seqno(ring))) {
1593 /* Issue a wake-up to catch stuck h/w. */ 1646 /* Issue a wake-up to catch stuck h/w. */
1594 if (waitqueue_active(&ring->irq_queue)) { 1647 if (waitqueue_active(&ring->irq_queue)) {
1595 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1648 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
@@ -1655,7 +1708,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1655{ 1708{
1656 struct drm_device *dev = (struct drm_device *)data; 1709 struct drm_device *dev = (struct drm_device *)data;
1657 drm_i915_private_t *dev_priv = dev->dev_private; 1710 drm_i915_private_t *dev_priv = dev->dev_private;
1658 uint32_t acthd[I915_NUM_RINGS], instdone, instdone1; 1711 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1659 struct intel_ring_buffer *ring; 1712 struct intel_ring_buffer *ring;
1660 bool err = false, idle; 1713 bool err = false, idle;
1661 int i; 1714 int i;
@@ -1683,25 +1736,16 @@ void i915_hangcheck_elapsed(unsigned long data)
1683 return; 1736 return;
1684 } 1737 }
1685 1738
1686 if (INTEL_INFO(dev)->gen < 4) { 1739 i915_get_extra_instdone(dev, instdone);
1687 instdone = I915_READ(INSTDONE);
1688 instdone1 = 0;
1689 } else {
1690 instdone = I915_READ(INSTDONE_I965);
1691 instdone1 = I915_READ(INSTDONE1);
1692 }
1693
1694 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1740 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1695 dev_priv->last_instdone == instdone && 1741 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1696 dev_priv->last_instdone1 == instdone1) {
1697 if (i915_hangcheck_hung(dev)) 1742 if (i915_hangcheck_hung(dev))
1698 return; 1743 return;
1699 } else { 1744 } else {
1700 dev_priv->hangcheck_count = 0; 1745 dev_priv->hangcheck_count = 0;
1701 1746
1702 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1747 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1703 dev_priv->last_instdone = instdone; 1748 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1704 dev_priv->last_instdone1 = instdone1;
1705 } 1749 }
1706 1750
1707repeat: 1751repeat:
@@ -2646,7 +2690,7 @@ void intel_irq_init(struct drm_device *dev)
2646 2690
2647 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2691 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2648 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2692 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2649 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 2693 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2650 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); 2694 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
2651 2695
2652 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2696 dev->driver->get_vblank_counter = i915_get_vblank_counter;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 28725ce5b82c..7637824c6a7d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -450,6 +450,7 @@
450#define RING_ACTHD(base) ((base)+0x74) 450#define RING_ACTHD(base) ((base)+0x74)
451#define RING_NOPID(base) ((base)+0x94) 451#define RING_NOPID(base) ((base)+0x94)
452#define RING_IMR(base) ((base)+0xa8) 452#define RING_IMR(base) ((base)+0xa8)
453#define RING_TIMESTAMP(base) ((base)+0x358)
453#define TAIL_ADDR 0x001FFFF8 454#define TAIL_ADDR 0x001FFFF8
454#define HEAD_WRAP_COUNT 0xFFE00000 455#define HEAD_WRAP_COUNT 0xFFE00000
455#define HEAD_WRAP_ONE 0x00200000 456#define HEAD_WRAP_ONE 0x00200000
@@ -478,6 +479,11 @@
478#define IPEIR_I965 0x02064 479#define IPEIR_I965 0x02064
479#define IPEHR_I965 0x02068 480#define IPEHR_I965 0x02068
480#define INSTDONE_I965 0x0206c 481#define INSTDONE_I965 0x0206c
482#define GEN7_INSTDONE_1 0x0206c
483#define GEN7_SC_INSTDONE 0x07100
484#define GEN7_SAMPLER_INSTDONE 0x0e160
485#define GEN7_ROW_INSTDONE 0x0e164
486#define I915_NUM_INSTDONE_REG 4
481#define RING_IPEIR(base) ((base)+0x64) 487#define RING_IPEIR(base) ((base)+0x64)
482#define RING_IPEHR(base) ((base)+0x68) 488#define RING_IPEHR(base) ((base)+0x68)
483#define RING_INSTDONE(base) ((base)+0x6c) 489#define RING_INSTDONE(base) ((base)+0x6c)
@@ -500,6 +506,8 @@
500#define DMA_FADD_I8XX 0x020d0 506#define DMA_FADD_I8XX 0x020d0
501 507
502#define ERROR_GEN6 0x040a0 508#define ERROR_GEN6 0x040a0
509#define GEN7_ERR_INT 0x44040
510#define ERR_INT_MMIO_UNCLAIMED (1<<13)
503 511
504/* GM45+ chicken bits -- debug workaround bits that may be required 512/* GM45+ chicken bits -- debug workaround bits that may be required
505 * for various sorts of correct behavior. The top 16 bits of each are 513 * for various sorts of correct behavior. The top 16 bits of each are
@@ -529,6 +537,8 @@
529#define GFX_PSMI_GRANULARITY (1<<10) 537#define GFX_PSMI_GRANULARITY (1<<10)
530#define GFX_PPGTT_ENABLE (1<<9) 538#define GFX_PPGTT_ENABLE (1<<9)
531 539
540#define VLV_DISPLAY_BASE 0x180000
541
532#define SCPD0 0x0209c /* 915+ only */ 542#define SCPD0 0x0209c /* 915+ only */
533#define IER 0x020a0 543#define IER 0x020a0
534#define IIR 0x020a4 544#define IIR 0x020a4
@@ -1496,6 +1506,14 @@
1496 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 1506 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
1497 GEN7_CXT_GT1_SIZE(ctx_reg) + \ 1507 GEN7_CXT_GT1_SIZE(ctx_reg) + \
1498 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1508 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1509#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
1510#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
1511#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
1512#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
1513 HSW_CXT_RING_SIZE(ctx_reg) + \
1514 HSW_CXT_RENDER_SIZE(ctx_reg) + \
1515 GEN7_CXT_VFSTATE_SIZE(ctx_reg))
1516
1499 1517
1500/* 1518/*
1501 * Overlay regs 1519 * Overlay regs
@@ -1549,12 +1567,35 @@
1549 1567
1550/* VGA port control */ 1568/* VGA port control */
1551#define ADPA 0x61100 1569#define ADPA 0x61100
1570#define PCH_ADPA 0xe1100
1571#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
1572
1552#define ADPA_DAC_ENABLE (1<<31) 1573#define ADPA_DAC_ENABLE (1<<31)
1553#define ADPA_DAC_DISABLE 0 1574#define ADPA_DAC_DISABLE 0
1554#define ADPA_PIPE_SELECT_MASK (1<<30) 1575#define ADPA_PIPE_SELECT_MASK (1<<30)
1555#define ADPA_PIPE_A_SELECT 0 1576#define ADPA_PIPE_A_SELECT 0
1556#define ADPA_PIPE_B_SELECT (1<<30) 1577#define ADPA_PIPE_B_SELECT (1<<30)
1557#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) 1578#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
1579/* CPT uses bits 29:30 for pch transcoder select */
1580#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
1581#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
1582#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
1583#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
1584#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
1585#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
1586#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
1587#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
1588#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
1589#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
1590#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
1591#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
1592#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
1593#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
1594#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
1595#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
1596#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
1597#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
1598#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
1558#define ADPA_USE_VGA_HVPOLARITY (1<<15) 1599#define ADPA_USE_VGA_HVPOLARITY (1<<15)
1559#define ADPA_SETS_HVPOLARITY 0 1600#define ADPA_SETS_HVPOLARITY 0
1560#define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1601#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
@@ -1753,6 +1794,10 @@
1753 1794
1754/* Video Data Island Packet control */ 1795/* Video Data Island Packet control */
1755#define VIDEO_DIP_DATA 0x61178 1796#define VIDEO_DIP_DATA 0x61178
1797/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
1798 * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
1799 * of the infoframe structure specified by CEA-861. */
1800#define VIDEO_DIP_DATA_SIZE 32
1756#define VIDEO_DIP_CTL 0x61170 1801#define VIDEO_DIP_CTL 0x61170
1757/* Pre HSW: */ 1802/* Pre HSW: */
1758#define VIDEO_DIP_ENABLE (1 << 31) 1803#define VIDEO_DIP_ENABLE (1 << 31)
@@ -3889,31 +3934,6 @@
3889#define FDI_PLL_CTL_1 0xfe000 3934#define FDI_PLL_CTL_1 0xfe000
3890#define FDI_PLL_CTL_2 0xfe004 3935#define FDI_PLL_CTL_2 0xfe004
3891 3936
3892/* CRT */
3893#define PCH_ADPA 0xe1100
3894#define ADPA_TRANS_SELECT_MASK (1<<30)
3895#define ADPA_TRANS_A_SELECT 0
3896#define ADPA_TRANS_B_SELECT (1<<30)
3897#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
3898#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
3899#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
3900#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
3901#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
3902#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
3903#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
3904#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
3905#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
3906#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
3907#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
3908#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
3909#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
3910#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
3911#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
3912#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
3913#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
3914#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
3915#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3916
3917/* or SDVOB */ 3937/* or SDVOB */
3918#define HDMIB 0xe1140 3938#define HDMIB 0xe1140
3919#define PORT_ENABLE (1 << 31) 3939#define PORT_ENABLE (1 << 31)
@@ -4021,6 +4041,8 @@
4021#define PORT_TRANS_C_SEL_CPT (2<<29) 4041#define PORT_TRANS_C_SEL_CPT (2<<29)
4022#define PORT_TRANS_SEL_MASK (3<<29) 4042#define PORT_TRANS_SEL_MASK (3<<29)
4023#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) 4043#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
4044#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
4045#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
4024 4046
4025#define TRANS_DP_CTL_A 0xe0300 4047#define TRANS_DP_CTL_A 0xe0300
4026#define TRANS_DP_CTL_B 0xe1300 4048#define TRANS_DP_CTL_B 0xe1300
@@ -4239,7 +4261,15 @@
4239#define G4X_HDMIW_HDMIEDID 0x6210C 4261#define G4X_HDMIW_HDMIEDID 0x6210C
4240 4262
4241#define IBX_HDMIW_HDMIEDID_A 0xE2050 4263#define IBX_HDMIW_HDMIEDID_A 0xE2050
4264#define IBX_HDMIW_HDMIEDID_B 0xE2150
4265#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4266 IBX_HDMIW_HDMIEDID_A, \
4267 IBX_HDMIW_HDMIEDID_B)
4242#define IBX_AUD_CNTL_ST_A 0xE20B4 4268#define IBX_AUD_CNTL_ST_A 0xE20B4
4269#define IBX_AUD_CNTL_ST_B 0xE21B4
4270#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4271 IBX_AUD_CNTL_ST_A, \
4272 IBX_AUD_CNTL_ST_B)
4243#define IBX_ELD_BUFFER_SIZE (0x1f << 10) 4273#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
4244#define IBX_ELD_ADDRESS (0x1f << 5) 4274#define IBX_ELD_ADDRESS (0x1f << 5)
4245#define IBX_ELD_ACK (1 << 4) 4275#define IBX_ELD_ACK (1 << 4)
@@ -4248,7 +4278,15 @@
4248#define IBX_CP_READYB (1 << 1) 4278#define IBX_CP_READYB (1 << 1)
4249 4279
4250#define CPT_HDMIW_HDMIEDID_A 0xE5050 4280#define CPT_HDMIW_HDMIEDID_A 0xE5050
4281#define CPT_HDMIW_HDMIEDID_B 0xE5150
4282#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
4283 CPT_HDMIW_HDMIEDID_A, \
4284 CPT_HDMIW_HDMIEDID_B)
4251#define CPT_AUD_CNTL_ST_A 0xE50B4 4285#define CPT_AUD_CNTL_ST_A 0xE50B4
4286#define CPT_AUD_CNTL_ST_B 0xE51B4
4287#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
4288 CPT_AUD_CNTL_ST_A, \
4289 CPT_AUD_CNTL_ST_B)
4252#define CPT_AUD_CNTRL_ST2 0xE50C0 4290#define CPT_AUD_CNTRL_ST2 0xE50C0
4253 4291
4254/* These are the 4 32-bit write offset registers for each stream 4292/* These are the 4 32-bit write offset registers for each stream
@@ -4258,7 +4296,15 @@
4258#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) 4296#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
4259 4297
4260#define IBX_AUD_CONFIG_A 0xe2000 4298#define IBX_AUD_CONFIG_A 0xe2000
4299#define IBX_AUD_CONFIG_B 0xe2100
4300#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
4301 IBX_AUD_CONFIG_A, \
4302 IBX_AUD_CONFIG_B)
4261#define CPT_AUD_CONFIG_A 0xe5000 4303#define CPT_AUD_CONFIG_A 0xe5000
4304#define CPT_AUD_CONFIG_B 0xe5100
4305#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
4306 CPT_AUD_CONFIG_A, \
4307 CPT_AUD_CONFIG_B)
4262#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 4308#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
4263#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 4309#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
4264#define AUD_CONFIG_UPPER_N_SHIFT 20 4310#define AUD_CONFIG_UPPER_N_SHIFT 20
@@ -4269,195 +4315,233 @@
4269#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 4315#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
4270#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 4316#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
4271 4317
4318/* HSW Audio */
4319#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
4320#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
4321#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
4322 HSW_AUD_CONFIG_A, \
4323 HSW_AUD_CONFIG_B)
4324
4325#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
4326#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
4327#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
4328 HSW_AUD_MISC_CTRL_A, \
4329 HSW_AUD_MISC_CTRL_B)
4330
4331#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
4332#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
4333#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
4334 HSW_AUD_DIP_ELD_CTRL_ST_A, \
4335 HSW_AUD_DIP_ELD_CTRL_ST_B)
4336
4337/* Audio Digital Converter */
4338#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
4339#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
4340#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
4341 HSW_AUD_DIG_CNVT_1, \
4342 HSW_AUD_DIG_CNVT_2)
4343#define DIP_PORT_SEL_MASK 0x3
4344
4345#define HSW_AUD_EDID_DATA_A 0x65050
4346#define HSW_AUD_EDID_DATA_B 0x65150
4347#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
4348 HSW_AUD_EDID_DATA_A, \
4349 HSW_AUD_EDID_DATA_B)
4350
4351#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
4352#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
4353#define AUDIO_INACTIVE_C (1<<11)
4354#define AUDIO_INACTIVE_B (1<<7)
4355#define AUDIO_INACTIVE_A (1<<3)
4356#define AUDIO_OUTPUT_ENABLE_A (1<<2)
4357#define AUDIO_OUTPUT_ENABLE_B (1<<6)
4358#define AUDIO_OUTPUT_ENABLE_C (1<<10)
4359#define AUDIO_ELD_VALID_A (1<<0)
4360#define AUDIO_ELD_VALID_B (1<<4)
4361#define AUDIO_ELD_VALID_C (1<<8)
4362#define AUDIO_CP_READY_A (1<<1)
4363#define AUDIO_CP_READY_B (1<<5)
4364#define AUDIO_CP_READY_C (1<<9)
4365
4272/* HSW Power Wells */ 4366/* HSW Power Wells */
4273#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ 4367#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
4274#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ 4368#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
4275#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ 4369#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
4276#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ 4370#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
4277#define HSW_PWR_WELL_ENABLE (1<<31) 4371#define HSW_PWR_WELL_ENABLE (1<<31)
4278#define HSW_PWR_WELL_STATE (1<<30) 4372#define HSW_PWR_WELL_STATE (1<<30)
4279#define HSW_PWR_WELL_CTL5 0x45410 4373#define HSW_PWR_WELL_CTL5 0x45410
4280#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) 4374#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4281#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) 4375#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4282#define HSW_PWR_WELL_FORCE_ON (1<<19) 4376#define HSW_PWR_WELL_FORCE_ON (1<<19)
4283#define HSW_PWR_WELL_CTL6 0x45414 4377#define HSW_PWR_WELL_CTL6 0x45414
4284 4378
4285/* Per-pipe DDI Function Control */ 4379/* Per-pipe DDI Function Control */
4286#define PIPE_DDI_FUNC_CTL_A 0x60400 4380#define PIPE_DDI_FUNC_CTL_A 0x60400
4287#define PIPE_DDI_FUNC_CTL_B 0x61400 4381#define PIPE_DDI_FUNC_CTL_B 0x61400
4288#define PIPE_DDI_FUNC_CTL_C 0x62400 4382#define PIPE_DDI_FUNC_CTL_C 0x62400
4289#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 4383#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
4290#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \ 4384#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
4291 PIPE_DDI_FUNC_CTL_A, \ 4385 PIPE_DDI_FUNC_CTL_B)
4292 PIPE_DDI_FUNC_CTL_B)
4293#define PIPE_DDI_FUNC_ENABLE (1<<31) 4386#define PIPE_DDI_FUNC_ENABLE (1<<31)
4294/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 4387/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4295#define PIPE_DDI_PORT_MASK (7<<28) 4388#define PIPE_DDI_PORT_MASK (7<<28)
4296#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) 4389#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
4297#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) 4390#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
4298#define PIPE_DDI_MODE_SELECT_DVI (1<<24) 4391#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
4392#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
4299#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) 4393#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
4300#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) 4394#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
4301#define PIPE_DDI_MODE_SELECT_FDI (4<<24) 4395#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
4302#define PIPE_DDI_BPC_8 (0<<20) 4396#define PIPE_DDI_BPC_MASK (7<<20)
4303#define PIPE_DDI_BPC_10 (1<<20) 4397#define PIPE_DDI_BPC_8 (0<<20)
4304#define PIPE_DDI_BPC_6 (2<<20) 4398#define PIPE_DDI_BPC_10 (1<<20)
4305#define PIPE_DDI_BPC_12 (3<<20) 4399#define PIPE_DDI_BPC_6 (2<<20)
4306#define PIPE_DDI_BFI_ENABLE (1<<4) 4400#define PIPE_DDI_BPC_12 (3<<20)
4307#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) 4401#define PIPE_DDI_PVSYNC (1<<17)
4308#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) 4402#define PIPE_DDI_PHSYNC (1<<16)
4309#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) 4403#define PIPE_DDI_BFI_ENABLE (1<<4)
4404#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
4405#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
4406#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
4310 4407
4311/* DisplayPort Transport Control */ 4408/* DisplayPort Transport Control */
4312#define DP_TP_CTL_A 0x64040 4409#define DP_TP_CTL_A 0x64040
4313#define DP_TP_CTL_B 0x64140 4410#define DP_TP_CTL_B 0x64140
4314#define DP_TP_CTL(port) _PORT(port, \ 4411#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
4315 DP_TP_CTL_A, \ 4412#define DP_TP_CTL_ENABLE (1<<31)
4316 DP_TP_CTL_B) 4413#define DP_TP_CTL_MODE_SST (0<<27)
4317#define DP_TP_CTL_ENABLE (1<<31) 4414#define DP_TP_CTL_MODE_MST (1<<27)
4318#define DP_TP_CTL_MODE_SST (0<<27)
4319#define DP_TP_CTL_MODE_MST (1<<27)
4320#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18) 4415#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
4321#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15) 4416#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
4322#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) 4417#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4323#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) 4418#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4324#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) 4419#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4325#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) 4420#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4326 4421
4327/* DisplayPort Transport Status */ 4422/* DisplayPort Transport Status */
4328#define DP_TP_STATUS_A 0x64044 4423#define DP_TP_STATUS_A 0x64044
4329#define DP_TP_STATUS_B 0x64144 4424#define DP_TP_STATUS_B 0x64144
4330#define DP_TP_STATUS(port) _PORT(port, \ 4425#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
4331 DP_TP_STATUS_A, \
4332 DP_TP_STATUS_B)
4333#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) 4426#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4334 4427
4335/* DDI Buffer Control */ 4428/* DDI Buffer Control */
4336#define DDI_BUF_CTL_A 0x64000 4429#define DDI_BUF_CTL_A 0x64000
4337#define DDI_BUF_CTL_B 0x64100 4430#define DDI_BUF_CTL_B 0x64100
4338#define DDI_BUF_CTL(port) _PORT(port, \ 4431#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
4339 DDI_BUF_CTL_A, \ 4432#define DDI_BUF_CTL_ENABLE (1<<31)
4340 DDI_BUF_CTL_B)
4341#define DDI_BUF_CTL_ENABLE (1<<31)
4342#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 4433#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4343#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */ 4434#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4344#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */ 4435#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4345#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */ 4436#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
4346#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */ 4437#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
4347#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */ 4438#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
4348#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */ 4439#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4349#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 4440#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4350#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4441#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4351#define DDI_BUF_EMP_MASK (0xf<<24) 4442#define DDI_BUF_EMP_MASK (0xf<<24)
4352#define DDI_BUF_IS_IDLE (1<<7) 4443#define DDI_BUF_IS_IDLE (1<<7)
4353#define DDI_PORT_WIDTH_X1 (0<<1) 4444#define DDI_PORT_WIDTH_X1 (0<<1)
4354#define DDI_PORT_WIDTH_X2 (1<<1) 4445#define DDI_PORT_WIDTH_X2 (1<<1)
4355#define DDI_PORT_WIDTH_X4 (3<<1) 4446#define DDI_PORT_WIDTH_X4 (3<<1)
4356#define DDI_INIT_DISPLAY_DETECTED (1<<0) 4447#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4357 4448
4358/* DDI Buffer Translations */ 4449/* DDI Buffer Translations */
4359#define DDI_BUF_TRANS_A 0x64E00 4450#define DDI_BUF_TRANS_A 0x64E00
4360#define DDI_BUF_TRANS_B 0x64E60 4451#define DDI_BUF_TRANS_B 0x64E60
4361#define DDI_BUF_TRANS(port) _PORT(port, \ 4452#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
4362 DDI_BUF_TRANS_A, \
4363 DDI_BUF_TRANS_B)
4364 4453
4365/* Sideband Interface (SBI) is programmed indirectly, via 4454/* Sideband Interface (SBI) is programmed indirectly, via
4366 * SBI_ADDR, which contains the register offset; and SBI_DATA, 4455 * SBI_ADDR, which contains the register offset; and SBI_DATA,
4367 * which contains the payload */ 4456 * which contains the payload */
4368#define SBI_ADDR 0xC6000 4457#define SBI_ADDR 0xC6000
4369#define SBI_DATA 0xC6004 4458#define SBI_DATA 0xC6004
4370#define SBI_CTL_STAT 0xC6008 4459#define SBI_CTL_STAT 0xC6008
4371#define SBI_CTL_OP_CRRD (0x6<<8) 4460#define SBI_CTL_OP_CRRD (0x6<<8)
4372#define SBI_CTL_OP_CRWR (0x7<<8) 4461#define SBI_CTL_OP_CRWR (0x7<<8)
4373#define SBI_RESPONSE_FAIL (0x1<<1) 4462#define SBI_RESPONSE_FAIL (0x1<<1)
4374#define SBI_RESPONSE_SUCCESS (0x0<<1) 4463#define SBI_RESPONSE_SUCCESS (0x0<<1)
4375#define SBI_BUSY (0x1<<0) 4464#define SBI_BUSY (0x1<<0)
4376#define SBI_READY (0x0<<0) 4465#define SBI_READY (0x0<<0)
4377 4466
4378/* SBI offsets */ 4467/* SBI offsets */
4379#define SBI_SSCDIVINTPHASE6 0x0600 4468#define SBI_SSCDIVINTPHASE6 0x0600
4380#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) 4469#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
4381#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) 4470#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
4382#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) 4471#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
4383#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) 4472#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
4384#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) 4473#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
4385#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) 4474#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4386#define SBI_SSCCTL 0x020c 4475#define SBI_SSCCTL 0x020c
4387#define SBI_SSCCTL6 0x060C 4476#define SBI_SSCCTL6 0x060C
4388#define SBI_SSCCTL_DISABLE (1<<0) 4477#define SBI_SSCCTL_DISABLE (1<<0)
4389#define SBI_SSCAUXDIV6 0x0610 4478#define SBI_SSCAUXDIV6 0x0610
4390#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4479#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4391#define SBI_DBUFF0 0x2a00 4480#define SBI_DBUFF0 0x2a00
4392 4481
4393/* LPT PIXCLK_GATE */ 4482/* LPT PIXCLK_GATE */
4394#define PIXCLK_GATE 0xC6020 4483#define PIXCLK_GATE 0xC6020
4395#define PIXCLK_GATE_UNGATE 1<<0 4484#define PIXCLK_GATE_UNGATE (1<<0)
4396#define PIXCLK_GATE_GATE 0<<0 4485#define PIXCLK_GATE_GATE (0<<0)
4397 4486
4398/* SPLL */ 4487/* SPLL */
4399#define SPLL_CTL 0x46020 4488#define SPLL_CTL 0x46020
4400#define SPLL_PLL_ENABLE (1<<31) 4489#define SPLL_PLL_ENABLE (1<<31)
4401#define SPLL_PLL_SCC (1<<28) 4490#define SPLL_PLL_SCC (1<<28)
4402#define SPLL_PLL_NON_SCC (2<<28) 4491#define SPLL_PLL_NON_SCC (2<<28)
4403#define SPLL_PLL_FREQ_810MHz (0<<26) 4492#define SPLL_PLL_FREQ_810MHz (0<<26)
4404#define SPLL_PLL_FREQ_1350MHz (1<<26) 4493#define SPLL_PLL_FREQ_1350MHz (1<<26)
4405 4494
4406/* WRPLL */ 4495/* WRPLL */
4407#define WRPLL_CTL1 0x46040 4496#define WRPLL_CTL1 0x46040
4408#define WRPLL_CTL2 0x46060 4497#define WRPLL_CTL2 0x46060
4409#define WRPLL_PLL_ENABLE (1<<31) 4498#define WRPLL_PLL_ENABLE (1<<31)
4410#define WRPLL_PLL_SELECT_SSC (0x01<<28) 4499#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4411#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) 4500#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
4412#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 4501#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4413/* WRPLL divider programming */ 4502/* WRPLL divider programming */
4414#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 4503#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4415#define WRPLL_DIVIDER_POST(x) ((x)<<8) 4504#define WRPLL_DIVIDER_POST(x) ((x)<<8)
4416#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) 4505#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
4417 4506
4418/* Port clock selection */ 4507/* Port clock selection */
4419#define PORT_CLK_SEL_A 0x46100 4508#define PORT_CLK_SEL_A 0x46100
4420#define PORT_CLK_SEL_B 0x46104 4509#define PORT_CLK_SEL_B 0x46104
4421#define PORT_CLK_SEL(port) _PORT(port, \ 4510#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
4422 PORT_CLK_SEL_A, \
4423 PORT_CLK_SEL_B)
4424#define PORT_CLK_SEL_LCPLL_2700 (0<<29) 4511#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
4425#define PORT_CLK_SEL_LCPLL_1350 (1<<29) 4512#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
4426#define PORT_CLK_SEL_LCPLL_810 (2<<29) 4513#define PORT_CLK_SEL_LCPLL_810 (2<<29)
4427#define PORT_CLK_SEL_SPLL (3<<29) 4514#define PORT_CLK_SEL_SPLL (3<<29)
4428#define PORT_CLK_SEL_WRPLL1 (4<<29) 4515#define PORT_CLK_SEL_WRPLL1 (4<<29)
4429#define PORT_CLK_SEL_WRPLL2 (5<<29) 4516#define PORT_CLK_SEL_WRPLL2 (5<<29)
4430 4517
4431/* Pipe clock selection */ 4518/* Pipe clock selection */
4432#define PIPE_CLK_SEL_A 0x46140 4519#define PIPE_CLK_SEL_A 0x46140
4433#define PIPE_CLK_SEL_B 0x46144 4520#define PIPE_CLK_SEL_B 0x46144
4434#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \ 4521#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
4435 PIPE_CLK_SEL_A, \
4436 PIPE_CLK_SEL_B)
4437/* For each pipe, we need to select the corresponding port clock */ 4522/* For each pipe, we need to select the corresponding port clock */
4438#define PIPE_CLK_SEL_DISABLED (0x0<<29) 4523#define PIPE_CLK_SEL_DISABLED (0x0<<29)
4439#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) 4524#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
4440 4525
4441/* LCPLL Control */ 4526/* LCPLL Control */
4442#define LCPLL_CTL 0x130040 4527#define LCPLL_CTL 0x130040
4443#define LCPLL_PLL_DISABLE (1<<31) 4528#define LCPLL_PLL_DISABLE (1<<31)
4444#define LCPLL_PLL_LOCK (1<<30) 4529#define LCPLL_PLL_LOCK (1<<30)
4445#define LCPLL_CD_CLOCK_DISABLE (1<<25) 4530#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4446#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 4531#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4447 4532
4448/* Pipe WM_LINETIME - watermark line time */ 4533/* Pipe WM_LINETIME - watermark line time */
4449#define PIPE_WM_LINETIME_A 0x45270 4534#define PIPE_WM_LINETIME_A 0x45270
4450#define PIPE_WM_LINETIME_B 0x45274 4535#define PIPE_WM_LINETIME_B 0x45274
4451#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \ 4536#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
4452 PIPE_WM_LINETIME_A, \ 4537 PIPE_WM_LINETIME_B)
4453 PIPE_WM_LINETIME_B) 4538#define PIPE_WM_LINETIME_MASK (0x1ff)
4454#define PIPE_WM_LINETIME_MASK (0x1ff) 4539#define PIPE_WM_LINETIME_TIME(x) ((x))
4455#define PIPE_WM_LINETIME_TIME(x) ((x))
4456#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16) 4540#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
4457#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16) 4541#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
4458 4542
4459/* SFUSE_STRAP */ 4543/* SFUSE_STRAP */
4460#define SFUSE_STRAP 0xc2014 4544#define SFUSE_STRAP 0xc2014
4461#define SFUSE_STRAP_DDIB_DETECTED (1<<2) 4545#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
4462#define SFUSE_STRAP_DDIC_DETECTED (1<<1) 4546#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4463#define SFUSE_STRAP_DDID_DETECTED (1<<0) 4547#define SFUSE_STRAP_DDID_DETECTED (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 7631807a2788..903eebd2117a 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -46,32 +46,32 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
46} 46}
47 47
48static ssize_t 48static ssize_t
49show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf) 49show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
50{ 50{
51 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); 52 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
53} 53}
54 54
55static ssize_t 55static ssize_t
56show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf) 56show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
57{ 57{
58 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); 60 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
61} 61}
62 62
63static ssize_t 63static ssize_t
64show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf) 64show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
65{ 65{
66 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); 68 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
69} 69}
70 70
71static ssize_t 71static ssize_t
72show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf) 72show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
73{ 73{
74 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); 74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); 76 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
77} 77}
@@ -93,6 +93,7 @@ static struct attribute_group rc6_attr_group = {
93 .name = power_group_name, 93 .name = power_group_name,
94 .attrs = rc6_attrs 94 .attrs = rc6_attrs
95}; 95};
96#endif
96 97
97static int l3_access_valid(struct drm_device *dev, loff_t offset) 98static int l3_access_valid(struct drm_device *dev, loff_t offset)
98{ 99{
@@ -202,37 +203,214 @@ static struct bin_attribute dpf_attrs = {
202 .mmap = NULL 203 .mmap = NULL
203}; 204};
204 205
206static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
207 struct device_attribute *attr, char *buf)
208{
209 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
210 struct drm_device *dev = minor->dev;
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret;
213
214 ret = i915_mutex_lock_interruptible(dev);
215 if (ret)
216 return ret;
217
218 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
219 mutex_unlock(&dev->struct_mutex);
220
221 return snprintf(buf, PAGE_SIZE, "%d", ret);
222}
223
224static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
225{
226 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
227 struct drm_device *dev = minor->dev;
228 struct drm_i915_private *dev_priv = dev->dev_private;
229 int ret;
230
231 ret = i915_mutex_lock_interruptible(dev);
232 if (ret)
233 return ret;
234
235 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
236 mutex_unlock(&dev->struct_mutex);
237
238 return snprintf(buf, PAGE_SIZE, "%d", ret);
239}
240
241static ssize_t gt_max_freq_mhz_store(struct device *kdev,
242 struct device_attribute *attr,
243 const char *buf, size_t count)
244{
245 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
246 struct drm_device *dev = minor->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 u32 val, rp_state_cap, hw_max, hw_min;
249 ssize_t ret;
250
251 ret = kstrtou32(buf, 0, &val);
252 if (ret)
253 return ret;
254
255 val /= GT_FREQUENCY_MULTIPLIER;
256
257 ret = mutex_lock_interruptible(&dev->struct_mutex);
258 if (ret)
259 return ret;
260
261 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
262 hw_max = (rp_state_cap & 0xff);
263 hw_min = ((rp_state_cap & 0xff0000) >> 16);
264
265 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
266 mutex_unlock(&dev->struct_mutex);
267 return -EINVAL;
268 }
269
270 if (dev_priv->rps.cur_delay > val)
271 gen6_set_rps(dev_priv->dev, val);
272
273 dev_priv->rps.max_delay = val;
274
275 mutex_unlock(&dev->struct_mutex);
276
277 return count;
278}
279
280static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
281{
282 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
283 struct drm_device *dev = minor->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private;
285 int ret;
286
287 ret = i915_mutex_lock_interruptible(dev);
288 if (ret)
289 return ret;
290
291 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
292 mutex_unlock(&dev->struct_mutex);
293
294 return snprintf(buf, PAGE_SIZE, "%d", ret);
295}
296
297static ssize_t gt_min_freq_mhz_store(struct device *kdev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
302 struct drm_device *dev = minor->dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 u32 val, rp_state_cap, hw_max, hw_min;
305 ssize_t ret;
306
307 ret = kstrtou32(buf, 0, &val);
308 if (ret)
309 return ret;
310
311 val /= GT_FREQUENCY_MULTIPLIER;
312
313 ret = mutex_lock_interruptible(&dev->struct_mutex);
314 if (ret)
315 return ret;
316
317 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
318 hw_max = (rp_state_cap & 0xff);
319 hw_min = ((rp_state_cap & 0xff0000) >> 16);
320
321 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
322 mutex_unlock(&dev->struct_mutex);
323 return -EINVAL;
324 }
325
326 if (dev_priv->rps.cur_delay < val)
327 gen6_set_rps(dev_priv->dev, val);
328
329 dev_priv->rps.min_delay = val;
330
331 mutex_unlock(&dev->struct_mutex);
332
333 return count;
334
335}
336
337static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
338static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
339static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
340
341
342static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
343static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
344static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
345static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
346
347/* For now we have a static number of RP states */
348static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
349{
350 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
351 struct drm_device *dev = minor->dev;
352 struct drm_i915_private *dev_priv = dev->dev_private;
353 u32 val, rp_state_cap;
354 ssize_t ret;
355
356 ret = mutex_lock_interruptible(&dev->struct_mutex);
357 if (ret)
358 return ret;
359 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
360 mutex_unlock(&dev->struct_mutex);
361
362 if (attr == &dev_attr_gt_RP0_freq_mhz) {
363 val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
364 } else if (attr == &dev_attr_gt_RP1_freq_mhz) {
365 val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
366 } else if (attr == &dev_attr_gt_RPn_freq_mhz) {
367 val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
368 } else {
369 BUG();
370 }
371 return snprintf(buf, PAGE_SIZE, "%d", val);
372}
373
374static const struct attribute *gen6_attrs[] = {
375 &dev_attr_gt_cur_freq_mhz.attr,
376 &dev_attr_gt_max_freq_mhz.attr,
377 &dev_attr_gt_min_freq_mhz.attr,
378 &dev_attr_gt_RP0_freq_mhz.attr,
379 &dev_attr_gt_RP1_freq_mhz.attr,
380 &dev_attr_gt_RPn_freq_mhz.attr,
381 NULL,
382};
383
205void i915_setup_sysfs(struct drm_device *dev) 384void i915_setup_sysfs(struct drm_device *dev)
206{ 385{
207 int ret; 386 int ret;
208 387
388#ifdef CONFIG_PM
209 if (INTEL_INFO(dev)->gen >= 6) { 389 if (INTEL_INFO(dev)->gen >= 6) {
210 ret = sysfs_merge_group(&dev->primary->kdev.kobj, 390 ret = sysfs_merge_group(&dev->primary->kdev.kobj,
211 &rc6_attr_group); 391 &rc6_attr_group);
212 if (ret) 392 if (ret)
213 DRM_ERROR("RC6 residency sysfs setup failed\n"); 393 DRM_ERROR("RC6 residency sysfs setup failed\n");
214 } 394 }
215 395#endif
216 if (IS_IVYBRIDGE(dev)) { 396 if (HAS_L3_GPU_CACHE(dev)) {
217 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); 397 ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
218 if (ret) 398 if (ret)
219 DRM_ERROR("l3 parity sysfs setup failed\n"); 399 DRM_ERROR("l3 parity sysfs setup failed\n");
220 } 400 }
401
402 if (INTEL_INFO(dev)->gen >= 6) {
403 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
404 if (ret)
405 DRM_ERROR("gen6 sysfs setup failed\n");
406 }
221} 407}
222 408
223void i915_teardown_sysfs(struct drm_device *dev) 409void i915_teardown_sysfs(struct drm_device *dev)
224{ 410{
411 sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
225 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 412 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
413#ifdef CONFIG_PM
226 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); 414 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
415#endif
227} 416}
228#else
229void i915_setup_sysfs(struct drm_device *dev)
230{
231 return;
232}
233
234void i915_teardown_sysfs(struct drm_device *dev)
235{
236 return;
237}
238#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fe90b3a84a6d..8134421b89a6 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict,
214); 214);
215 215
216TRACE_EVENT(i915_gem_evict_everything, 216TRACE_EVENT(i915_gem_evict_everything,
217 TP_PROTO(struct drm_device *dev, bool purgeable), 217 TP_PROTO(struct drm_device *dev),
218 TP_ARGS(dev, purgeable), 218 TP_ARGS(dev),
219 219
220 TP_STRUCT__entry( 220 TP_STRUCT__entry(
221 __field(u32, dev) 221 __field(u32, dev)
222 __field(bool, purgeable)
223 ), 222 ),
224 223
225 TP_fast_assign( 224 TP_fast_assign(
226 __entry->dev = dev->primary->index; 225 __entry->dev = dev->primary->index;
227 __entry->purgeable = purgeable;
228 ), 226 ),
229 227
230 TP_printk("dev=%d%s", 228 TP_printk("dev=%d", __entry->dev)
231 __entry->dev,
232 __entry->purgeable ? ", purgeable only" : "")
233); 229);
234 230
235TRACE_EVENT(i915_gem_ring_dispatch, 231TRACE_EVENT(i915_gem_ring_dispatch,
@@ -434,6 +430,21 @@ TRACE_EVENT(i915_reg_rw,
434 (u32)(__entry->val >> 32)) 430 (u32)(__entry->val >> 32))
435); 431);
436 432
433TRACE_EVENT(intel_gpu_freq_change,
434 TP_PROTO(u32 freq),
435 TP_ARGS(freq),
436
437 TP_STRUCT__entry(
438 __field(u32, freq)
439 ),
440
441 TP_fast_assign(
442 __entry->freq = freq;
443 ),
444
445 TP_printk("new_freq=%u", __entry->freq)
446);
447
437#endif /* _I915_TRACE_H_ */ 448#endif /* _I915_TRACE_H_ */
438 449
439/* This part must be outside protection */ 450/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c8f1c0db446d..893f30164b7e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -46,6 +46,7 @@
46struct intel_crt { 46struct intel_crt {
47 struct intel_encoder base; 47 struct intel_encoder base;
48 bool force_hotplug_required; 48 bool force_hotplug_required;
49 u32 adpa_reg;
49}; 50};
50 51
51static struct intel_crt *intel_attached_crt(struct drm_connector *connector) 52static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
@@ -54,42 +55,68 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
54 struct intel_crt, base); 55 struct intel_crt, base);
55} 56}
56 57
57static void pch_crt_dpms(struct drm_encoder *encoder, int mode) 58static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
58{ 59{
59 struct drm_device *dev = encoder->dev; 60 return container_of(encoder, struct intel_crt, base);
61}
62
63static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
64 enum pipe *pipe)
65{
66 struct drm_device *dev = encoder->base.dev;
60 struct drm_i915_private *dev_priv = dev->dev_private; 67 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct intel_crt *crt = intel_encoder_to_crt(encoder);
69 u32 tmp;
70
71 tmp = I915_READ(crt->adpa_reg);
72
73 if (!(tmp & ADPA_DAC_ENABLE))
74 return false;
75
76 if (HAS_PCH_CPT(dev))
77 *pipe = PORT_TO_PIPE_CPT(tmp);
78 else
79 *pipe = PORT_TO_PIPE(tmp);
80
81 return true;
82}
83
84static void intel_disable_crt(struct intel_encoder *encoder)
85{
86 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
87 struct intel_crt *crt = intel_encoder_to_crt(encoder);
61 u32 temp; 88 u32 temp;
62 89
63 temp = I915_READ(PCH_ADPA); 90 temp = I915_READ(crt->adpa_reg);
91 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
64 temp &= ~ADPA_DAC_ENABLE; 92 temp &= ~ADPA_DAC_ENABLE;
93 I915_WRITE(crt->adpa_reg, temp);
94}
65 95
66 switch (mode) { 96static void intel_enable_crt(struct intel_encoder *encoder)
67 case DRM_MODE_DPMS_ON: 97{
68 temp |= ADPA_DAC_ENABLE; 98 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
69 break; 99 struct intel_crt *crt = intel_encoder_to_crt(encoder);
70 case DRM_MODE_DPMS_STANDBY: 100 u32 temp;
71 case DRM_MODE_DPMS_SUSPEND:
72 case DRM_MODE_DPMS_OFF:
73 /* Just leave port enable cleared */
74 break;
75 }
76 101
77 I915_WRITE(PCH_ADPA, temp); 102 temp = I915_READ(crt->adpa_reg);
103 temp |= ADPA_DAC_ENABLE;
104 I915_WRITE(crt->adpa_reg, temp);
78} 105}
79 106
80static void gmch_crt_dpms(struct drm_encoder *encoder, int mode) 107/* Note: The caller is required to filter out dpms modes not supported by the
108 * platform. */
109static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
81{ 110{
82 struct drm_device *dev = encoder->dev; 111 struct drm_device *dev = encoder->base.dev;
83 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crt *crt = intel_encoder_to_crt(encoder);
84 u32 temp; 114 u32 temp;
85 115
86 temp = I915_READ(ADPA); 116 temp = I915_READ(crt->adpa_reg);
87 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 117 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
88 temp &= ~ADPA_DAC_ENABLE; 118 temp &= ~ADPA_DAC_ENABLE;
89 119
90 if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
91 mode = DRM_MODE_DPMS_OFF;
92
93 switch (mode) { 120 switch (mode) {
94 case DRM_MODE_DPMS_ON: 121 case DRM_MODE_DPMS_ON:
95 temp |= ADPA_DAC_ENABLE; 122 temp |= ADPA_DAC_ENABLE;
@@ -105,7 +132,51 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
105 break; 132 break;
106 } 133 }
107 134
108 I915_WRITE(ADPA, temp); 135 I915_WRITE(crt->adpa_reg, temp);
136}
137
138static void intel_crt_dpms(struct drm_connector *connector, int mode)
139{
140 struct drm_device *dev = connector->dev;
141 struct intel_encoder *encoder = intel_attached_encoder(connector);
142 struct drm_crtc *crtc;
143 int old_dpms;
144
145 /* PCH platforms and VLV only support on/off. */
146 if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
147 mode = DRM_MODE_DPMS_OFF;
148
149 if (mode == connector->dpms)
150 return;
151
152 old_dpms = connector->dpms;
153 connector->dpms = mode;
154
155 /* Only need to change hw state when actually enabled */
156 crtc = encoder->base.crtc;
157 if (!crtc) {
158 encoder->connectors_active = false;
159 return;
160 }
161
162 /* We need the pipe to run for anything but OFF. */
163 if (mode == DRM_MODE_DPMS_OFF)
164 encoder->connectors_active = false;
165 else
166 encoder->connectors_active = true;
167
168 if (mode < old_dpms) {
169 /* From off to on, enable the pipe first. */
170 intel_crtc_update_dpms(crtc);
171
172 intel_crt_set_dpms(encoder, mode);
173 } else {
174 intel_crt_set_dpms(encoder, mode);
175
176 intel_crtc_update_dpms(crtc);
177 }
178
179 intel_modeset_check_state(connector->dev);
109} 180}
110 181
111static int intel_crt_mode_valid(struct drm_connector *connector, 182static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -144,19 +215,15 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
144 215
145 struct drm_device *dev = encoder->dev; 216 struct drm_device *dev = encoder->dev;
146 struct drm_crtc *crtc = encoder->crtc; 217 struct drm_crtc *crtc = encoder->crtc;
218 struct intel_crt *crt =
219 intel_encoder_to_crt(to_intel_encoder(encoder));
147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
148 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
149 int dpll_md_reg; 222 int dpll_md_reg;
150 u32 adpa, dpll_md; 223 u32 adpa, dpll_md;
151 u32 adpa_reg;
152 224
153 dpll_md_reg = DPLL_MD(intel_crtc->pipe); 225 dpll_md_reg = DPLL_MD(intel_crtc->pipe);
154 226
155 if (HAS_PCH_SPLIT(dev))
156 adpa_reg = PCH_ADPA;
157 else
158 adpa_reg = ADPA;
159
160 /* 227 /*
161 * Disable separate mode multiplier used when cloning SDVO to CRT 228 * Disable separate mode multiplier used when cloning SDVO to CRT
162 * XXX this needs to be adjusted when we really are cloning 229 * XXX this needs to be adjusted when we really are cloning
@@ -184,7 +251,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
184 if (!HAS_PCH_SPLIT(dev)) 251 if (!HAS_PCH_SPLIT(dev))
185 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); 252 I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
186 253
187 I915_WRITE(adpa_reg, adpa); 254 I915_WRITE(crt->adpa_reg, adpa);
188} 255}
189 256
190static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 257static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
@@ -544,14 +611,12 @@ intel_crt_detect(struct drm_connector *connector, bool force)
544 return connector->status; 611 return connector->status;
545 612
546 /* for pre-945g platforms use load detect */ 613 /* for pre-945g platforms use load detect */
547 if (intel_get_load_detect_pipe(&crt->base, connector, NULL, 614 if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
548 &tmp)) {
549 if (intel_crt_detect_ddc(connector)) 615 if (intel_crt_detect_ddc(connector))
550 status = connector_status_connected; 616 status = connector_status_connected;
551 else 617 else
552 status = intel_crt_load_detect(crt); 618 status = intel_crt_load_detect(crt);
553 intel_release_load_detect_pipe(&crt->base, connector, 619 intel_release_load_detect_pipe(connector, &tmp);
554 &tmp);
555 } else 620 } else
556 status = connector_status_unknown; 621 status = connector_status_unknown;
557 622
@@ -602,25 +667,15 @@ static void intel_crt_reset(struct drm_connector *connector)
602 * Routines for controlling stuff on the analog port 667 * Routines for controlling stuff on the analog port
603 */ 668 */
604 669
605static const struct drm_encoder_helper_funcs pch_encoder_funcs = { 670static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
606 .mode_fixup = intel_crt_mode_fixup, 671 .mode_fixup = intel_crt_mode_fixup,
607 .prepare = intel_encoder_prepare,
608 .commit = intel_encoder_commit,
609 .mode_set = intel_crt_mode_set, 672 .mode_set = intel_crt_mode_set,
610 .dpms = pch_crt_dpms, 673 .disable = intel_encoder_noop,
611};
612
613static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
614 .mode_fixup = intel_crt_mode_fixup,
615 .prepare = intel_encoder_prepare,
616 .commit = intel_encoder_commit,
617 .mode_set = intel_crt_mode_set,
618 .dpms = gmch_crt_dpms,
619}; 674};
620 675
621static const struct drm_connector_funcs intel_crt_connector_funcs = { 676static const struct drm_connector_funcs intel_crt_connector_funcs = {
622 .reset = intel_crt_reset, 677 .reset = intel_crt_reset,
623 .dpms = drm_helper_connector_dpms, 678 .dpms = intel_crt_dpms,
624 .detect = intel_crt_detect, 679 .detect = intel_crt_detect,
625 .fill_modes = drm_helper_probe_single_connector_modes, 680 .fill_modes = drm_helper_probe_single_connector_modes,
626 .destroy = intel_crt_destroy, 681 .destroy = intel_crt_destroy,
@@ -661,7 +716,6 @@ void intel_crt_init(struct drm_device *dev)
661 struct intel_crt *crt; 716 struct intel_crt *crt;
662 struct intel_connector *intel_connector; 717 struct intel_connector *intel_connector;
663 struct drm_i915_private *dev_priv = dev->dev_private; 718 struct drm_i915_private *dev_priv = dev->dev_private;
664 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
665 719
666 /* Skip machines without VGA that falsely report hotplug events */ 720 /* Skip machines without VGA that falsely report hotplug events */
667 if (dmi_check_system(intel_no_crt)) 721 if (dmi_check_system(intel_no_crt))
@@ -687,13 +741,11 @@ void intel_crt_init(struct drm_device *dev)
687 intel_connector_attach_encoder(intel_connector, &crt->base); 741 intel_connector_attach_encoder(intel_connector, &crt->base);
688 742
689 crt->base.type = INTEL_OUTPUT_ANALOG; 743 crt->base.type = INTEL_OUTPUT_ANALOG;
690 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | 744 crt->base.cloneable = true;
691 1 << INTEL_ANALOG_CLONE_BIT |
692 1 << INTEL_SDVO_LVDS_CLONE_BIT);
693 if (IS_HASWELL(dev)) 745 if (IS_HASWELL(dev))
694 crt->base.crtc_mask = (1 << 0); 746 crt->base.crtc_mask = (1 << 0);
695 else 747 else
696 crt->base.crtc_mask = (1 << 0) | (1 << 1); 748 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
697 749
698 if (IS_GEN2(dev)) 750 if (IS_GEN2(dev))
699 connector->interlace_allowed = 0; 751 connector->interlace_allowed = 0;
@@ -702,11 +754,18 @@ void intel_crt_init(struct drm_device *dev)
702 connector->doublescan_allowed = 0; 754 connector->doublescan_allowed = 0;
703 755
704 if (HAS_PCH_SPLIT(dev)) 756 if (HAS_PCH_SPLIT(dev))
705 encoder_helper_funcs = &pch_encoder_funcs; 757 crt->adpa_reg = PCH_ADPA;
758 else if (IS_VALLEYVIEW(dev))
759 crt->adpa_reg = VLV_ADPA;
706 else 760 else
707 encoder_helper_funcs = &gmch_encoder_funcs; 761 crt->adpa_reg = ADPA;
762
763 crt->base.disable = intel_disable_crt;
764 crt->base.enable = intel_enable_crt;
765 crt->base.get_hw_state = intel_crt_get_hw_state;
766 intel_connector->get_hw_state = intel_connector_get_hw_state;
708 767
709 drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs); 768 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
710 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 769 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
711 770
712 drm_sysfs_connector_add(connector); 771 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 933c74859172..bfe375466a0e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -250,7 +250,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
250 case PORT_B: 250 case PORT_B:
251 case PORT_C: 251 case PORT_C:
252 case PORT_D: 252 case PORT_D:
253 intel_hdmi_init(dev, DDI_BUF_CTL(port)); 253 intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
254 break; 254 break;
255 default: 255 default:
256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n", 256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
@@ -267,7 +267,8 @@ struct wrpll_tmds_clock {
267 u16 r2; /* Reference divider */ 267 u16 r2; /* Reference divider */
268}; 268};
269 269
270/* Table of matching values for WRPLL clocks programming for each frequency */ 270/* Table of matching values for WRPLL clocks programming for each frequency.
271 * The code assumes this table is sorted. */
271static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = { 272static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
272 {19750, 38, 25, 18}, 273 {19750, 38, 25, 18},
273 {20000, 48, 32, 18}, 274 {20000, 48, 32, 18},
@@ -277,7 +278,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
277 {23000, 36, 23, 15}, 278 {23000, 36, 23, 15},
278 {23500, 40, 40, 23}, 279 {23500, 40, 40, 23},
279 {23750, 26, 16, 14}, 280 {23750, 26, 16, 14},
280 {23750, 26, 16, 14},
281 {24000, 36, 24, 15}, 281 {24000, 36, 24, 15},
282 {25000, 36, 25, 15}, 282 {25000, 36, 25, 15},
283 {25175, 26, 40, 33}, 283 {25175, 26, 40, 33},
@@ -437,7 +437,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
437 {108000, 8, 24, 15}, 437 {108000, 8, 24, 15},
438 {108108, 8, 173, 108}, 438 {108108, 8, 173, 108},
439 {109000, 6, 23, 19}, 439 {109000, 6, 23, 19},
440 {109000, 6, 23, 19},
441 {110000, 6, 22, 18}, 440 {110000, 6, 22, 18},
442 {110013, 6, 22, 18}, 441 {110013, 6, 22, 18},
443 {110250, 8, 49, 30}, 442 {110250, 8, 49, 30},
@@ -614,7 +613,6 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
614 {218250, 4, 42, 26}, 613 {218250, 4, 42, 26},
615 {218750, 4, 34, 21}, 614 {218750, 4, 34, 21},
616 {219000, 4, 47, 29}, 615 {219000, 4, 47, 29},
617 {219000, 4, 47, 29},
618 {220000, 4, 44, 27}, 616 {220000, 4, 44, 27},
619 {220640, 4, 49, 30}, 617 {220640, 4, 49, 30},
620 {220750, 4, 36, 22}, 618 {220750, 4, 36, 22},
@@ -658,7 +656,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
658 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 656 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
659 int port = intel_hdmi->ddi_port; 657 int port = intel_hdmi->ddi_port;
660 int pipe = intel_crtc->pipe; 658 int pipe = intel_crtc->pipe;
661 int p, n2, r2, valid=0; 659 int p, n2, r2;
662 u32 temp, i; 660 u32 temp, i;
663 661
664 /* On Haswell, we need to enable the clocks and prepare DDI function to 662 /* On Haswell, we need to enable the clocks and prepare DDI function to
@@ -666,26 +664,23 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
666 */ 664 */
667 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); 665 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
668 666
669 for (i=0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) { 667 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
670 if (crtc->mode.clock == wrpll_tmds_clock_table[i].clock) { 668 if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
671 p = wrpll_tmds_clock_table[i].p; 669 break;
672 n2 = wrpll_tmds_clock_table[i].n2;
673 r2 = wrpll_tmds_clock_table[i].r2;
674 670
675 DRM_DEBUG_KMS("WR PLL clock: found settings for %dKHz refresh rate: p=%d, n2=%d, r2=%d\n", 671 if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
676 crtc->mode.clock, 672 i--;
677 p, n2, r2);
678 673
679 valid = 1; 674 p = wrpll_tmds_clock_table[i].p;
680 break; 675 n2 = wrpll_tmds_clock_table[i].n2;
681 } 676 r2 = wrpll_tmds_clock_table[i].r2;
682 }
683 677
684 if (!valid) { 678 if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
685 DRM_ERROR("Unable to find WR PLL clock settings for %dKHz refresh rate\n", 679 DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
686 crtc->mode.clock); 680 wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
687 return; 681
688 } 682 DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
683 crtc->mode.clock, p, n2, r2);
689 684
690 /* Enable LCPLL if disabled */ 685 /* Enable LCPLL if disabled */
691 temp = I915_READ(LCPLL_CTL); 686 temp = I915_READ(LCPLL_CTL);
@@ -718,46 +713,107 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
718 /* Proper support for digital audio needs a new logic and a new set 713 /* Proper support for digital audio needs a new logic and a new set
719 * of registers, so we leave it for future patch bombing. 714 * of registers, so we leave it for future patch bombing.
720 */ 715 */
721 DRM_DEBUG_DRIVER("HDMI audio on pipe %c not yet supported on DDI\n", 716 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
722 pipe_name(intel_crtc->pipe)); 717 pipe_name(intel_crtc->pipe));
718
719 /* write eld */
720 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
721 intel_write_eld(encoder, adjusted_mode);
723 } 722 }
724 723
725 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ 724 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
726 temp = I915_READ(DDI_FUNC_CTL(pipe)); 725 temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
727 temp &= ~PIPE_DDI_PORT_MASK; 726
728 temp &= ~PIPE_DDI_BPC_12; 727 switch (intel_crtc->bpp) {
729 temp |= PIPE_DDI_SELECT_PORT(port) | 728 case 18:
730 PIPE_DDI_MODE_SELECT_HDMI | 729 temp |= PIPE_DDI_BPC_6;
731 ((intel_crtc->bpp > 24) ? 730 break;
732 PIPE_DDI_BPC_12 : 731 case 24:
733 PIPE_DDI_BPC_8) | 732 temp |= PIPE_DDI_BPC_8;
734 PIPE_DDI_FUNC_ENABLE; 733 break;
734 case 30:
735 temp |= PIPE_DDI_BPC_10;
736 break;
737 case 36:
738 temp |= PIPE_DDI_BPC_12;
739 break;
740 default:
741 WARN(1, "%d bpp unsupported by pipe DDI function\n",
742 intel_crtc->bpp);
743 }
744
745 if (intel_hdmi->has_hdmi_sink)
746 temp |= PIPE_DDI_MODE_SELECT_HDMI;
747 else
748 temp |= PIPE_DDI_MODE_SELECT_DVI;
749
750 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
751 temp |= PIPE_DDI_PVSYNC;
752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
753 temp |= PIPE_DDI_PHSYNC;
735 754
736 I915_WRITE(DDI_FUNC_CTL(pipe), temp); 755 I915_WRITE(DDI_FUNC_CTL(pipe), temp);
737 756
738 intel_hdmi->set_infoframes(encoder, adjusted_mode); 757 intel_hdmi->set_infoframes(encoder, adjusted_mode);
739} 758}
740 759
741void intel_ddi_dpms(struct drm_encoder *encoder, int mode) 760bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
761 enum pipe *pipe)
742{ 762{
743 struct drm_device *dev = encoder->dev; 763 struct drm_device *dev = encoder->base.dev;
744 struct drm_i915_private *dev_priv = dev->dev_private; 764 struct drm_i915_private *dev_priv = dev->dev_private;
745 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 765 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
766 u32 tmp;
767 int i;
768
769 tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
770
771 if (!(tmp & DDI_BUF_CTL_ENABLE))
772 return false;
773
774 for_each_pipe(i) {
775 tmp = I915_READ(DDI_FUNC_CTL(i));
776
777 if ((tmp & PIPE_DDI_PORT_MASK)
778 == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
779 *pipe = i;
780 return true;
781 }
782 }
783
784 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
785
786 return true;
787}
788
789void intel_enable_ddi(struct intel_encoder *encoder)
790{
791 struct drm_device *dev = encoder->base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
746 int port = intel_hdmi->ddi_port; 794 int port = intel_hdmi->ddi_port;
747 u32 temp; 795 u32 temp;
748 796
749 temp = I915_READ(DDI_BUF_CTL(port)); 797 temp = I915_READ(DDI_BUF_CTL(port));
750 798 temp |= DDI_BUF_CTL_ENABLE;
751 if (mode != DRM_MODE_DPMS_ON) {
752 temp &= ~DDI_BUF_CTL_ENABLE;
753 } else {
754 temp |= DDI_BUF_CTL_ENABLE;
755 }
756 799
757 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, 800 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
758 * and swing/emphasis values are ignored so nothing special needs 801 * and swing/emphasis values are ignored so nothing special needs
759 * to be done besides enabling the port. 802 * to be done besides enabling the port.
760 */ 803 */
761 I915_WRITE(DDI_BUF_CTL(port), 804 I915_WRITE(DDI_BUF_CTL(port), temp);
762 temp); 805}
806
807void intel_disable_ddi(struct intel_encoder *encoder)
808{
809 struct drm_device *dev = encoder->base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
812 int port = intel_hdmi->ddi_port;
813 u32 temp;
814
815 temp = I915_READ(DDI_BUF_CTL(port));
816 temp &= ~DDI_BUF_CTL_ENABLE;
817
818 I915_WRITE(DDI_BUF_CTL(port), temp);
763} 819}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ea9a3ceb269..e3c02655d36f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1006,7 +1006,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1006 /* Wait for the Pipe State to go off */ 1006 /* Wait for the Pipe State to go off */
1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1008 100)) 1008 100))
1009 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1009 WARN(1, "pipe_off wait timed out\n");
1010 } else { 1010 } else {
1011 u32 last_line, line_mask; 1011 u32 last_line, line_mask;
1012 int reg = PIPEDSL(pipe); 1012 int reg = PIPEDSL(pipe);
@@ -1024,7 +1024,7 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1024 } while (((I915_READ(reg) & line_mask) != last_line) && 1024 } while (((I915_READ(reg) & line_mask) != last_line) &&
1025 time_after(timeout, jiffies)); 1025 time_after(timeout, jiffies));
1026 if (time_after(jiffies, timeout)) 1026 if (time_after(jiffies, timeout))
1027 DRM_DEBUG_KMS("pipe_off wait timed out\n"); 1027 WARN(1, "pipe_off wait timed out\n");
1028 } 1028 }
1029} 1029}
1030 1030
@@ -1431,6 +1431,8 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1431 * protect mechanism may be enabled. 1431 * protect mechanism may be enabled.
1432 * 1432 *
1433 * Note! This is for pre-ILK only. 1433 * Note! This is for pre-ILK only.
1434 *
1435 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1434 */ 1436 */
1435static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1437static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1436{ 1438{
@@ -1860,59 +1862,6 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv,
1860 intel_wait_for_vblank(dev_priv->dev, pipe); 1862 intel_wait_for_vblank(dev_priv->dev, pipe);
1861} 1863}
1862 1864
1863static void disable_pch_dp(struct drm_i915_private *dev_priv,
1864 enum pipe pipe, int reg, u32 port_sel)
1865{
1866 u32 val = I915_READ(reg);
1867 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1868 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1869 I915_WRITE(reg, val & ~DP_PORT_EN);
1870 }
1871}
1872
1873static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1874 enum pipe pipe, int reg)
1875{
1876 u32 val = I915_READ(reg);
1877 if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
1878 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1879 reg, pipe);
1880 I915_WRITE(reg, val & ~PORT_ENABLE);
1881 }
1882}
1883
1884/* Disable any ports connected to this transcoder */
1885static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1886 enum pipe pipe)
1887{
1888 u32 reg, val;
1889
1890 val = I915_READ(PCH_PP_CONTROL);
1891 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1892
1893 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1894 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1895 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1896
1897 reg = PCH_ADPA;
1898 val = I915_READ(reg);
1899 if (adpa_pipe_enabled(dev_priv, pipe, val))
1900 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1901
1902 reg = PCH_LVDS;
1903 val = I915_READ(reg);
1904 if (lvds_pipe_enabled(dev_priv, pipe, val)) {
1905 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1906 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1907 POSTING_READ(reg);
1908 udelay(100);
1909 }
1910
1911 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1912 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1913 disable_pch_hdmi(dev_priv, pipe, HDMID);
1914}
1915
1916int 1865int
1917intel_pin_and_fence_fb_obj(struct drm_device *dev, 1866intel_pin_and_fence_fb_obj(struct drm_device *dev,
1918 struct drm_i915_gem_object *obj, 1867 struct drm_i915_gem_object *obj,
@@ -2201,16 +2150,17 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2201 2150
2202static int 2151static int
2203intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2152intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2204 struct drm_framebuffer *old_fb) 2153 struct drm_framebuffer *fb)
2205{ 2154{
2206 struct drm_device *dev = crtc->dev; 2155 struct drm_device *dev = crtc->dev;
2207 struct drm_i915_private *dev_priv = dev->dev_private; 2156 struct drm_i915_private *dev_priv = dev->dev_private;
2208 struct drm_i915_master_private *master_priv; 2157 struct drm_i915_master_private *master_priv;
2209 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159 struct drm_framebuffer *old_fb;
2210 int ret; 2160 int ret;
2211 2161
2212 /* no fb bound */ 2162 /* no fb bound */
2213 if (!crtc->fb) { 2163 if (!fb) {
2214 DRM_ERROR("No FB bound\n"); 2164 DRM_ERROR("No FB bound\n");
2215 return 0; 2165 return 0;
2216 } 2166 }
@@ -2224,7 +2174,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2224 2174
2225 mutex_lock(&dev->struct_mutex); 2175 mutex_lock(&dev->struct_mutex);
2226 ret = intel_pin_and_fence_fb_obj(dev, 2176 ret = intel_pin_and_fence_fb_obj(dev,
2227 to_intel_framebuffer(crtc->fb)->obj, 2177 to_intel_framebuffer(fb)->obj,
2228 NULL); 2178 NULL);
2229 if (ret != 0) { 2179 if (ret != 0) {
2230 mutex_unlock(&dev->struct_mutex); 2180 mutex_unlock(&dev->struct_mutex);
@@ -2232,17 +2182,22 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2232 return ret; 2182 return ret;
2233 } 2183 }
2234 2184
2235 if (old_fb) 2185 if (crtc->fb)
2236 intel_finish_fb(old_fb); 2186 intel_finish_fb(crtc->fb);
2237 2187
2238 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y); 2188 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2239 if (ret) { 2189 if (ret) {
2240 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 2190 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2241 mutex_unlock(&dev->struct_mutex); 2191 mutex_unlock(&dev->struct_mutex);
2242 DRM_ERROR("failed to update base address\n"); 2192 DRM_ERROR("failed to update base address\n");
2243 return ret; 2193 return ret;
2244 } 2194 }
2245 2195
2196 old_fb = crtc->fb;
2197 crtc->fb = fb;
2198 crtc->x = x;
2199 crtc->y = y;
2200
2246 if (old_fb) { 2201 if (old_fb) {
2247 intel_wait_for_vblank(dev, intel_crtc->pipe); 2202 intel_wait_for_vblank(dev, intel_crtc->pipe);
2248 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 2203 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
@@ -2709,11 +2664,10 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2709 DRM_DEBUG_KMS("FDI train done.\n"); 2664 DRM_DEBUG_KMS("FDI train done.\n");
2710} 2665}
2711 2666
2712static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) 2667static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2713{ 2668{
2714 struct drm_device *dev = crtc->dev; 2669 struct drm_device *dev = intel_crtc->base.dev;
2715 struct drm_i915_private *dev_priv = dev->dev_private; 2670 struct drm_i915_private *dev_priv = dev->dev_private;
2716 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2717 int pipe = intel_crtc->pipe; 2671 int pipe = intel_crtc->pipe;
2718 u32 reg, temp; 2672 u32 reg, temp;
2719 2673
@@ -2754,6 +2708,35 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2754 } 2708 }
2755} 2709}
2756 2710
2711static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2712{
2713 struct drm_device *dev = intel_crtc->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 int pipe = intel_crtc->pipe;
2716 u32 reg, temp;
2717
2718 /* Switch from PCDclk to Rawclk */
2719 reg = FDI_RX_CTL(pipe);
2720 temp = I915_READ(reg);
2721 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2722
2723 /* Disable CPU FDI TX PLL */
2724 reg = FDI_TX_CTL(pipe);
2725 temp = I915_READ(reg);
2726 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2727
2728 POSTING_READ(reg);
2729 udelay(100);
2730
2731 reg = FDI_RX_CTL(pipe);
2732 temp = I915_READ(reg);
2733 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2734
2735 /* Wait for the clocks to turn off. */
2736 POSTING_READ(reg);
2737 udelay(100);
2738}
2739
2757static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) 2740static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2758{ 2741{
2759 struct drm_i915_private *dev_priv = dev->dev_private; 2742 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2838,13 +2821,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2838static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2821static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2839{ 2822{
2840 struct drm_device *dev = crtc->dev; 2823 struct drm_device *dev = crtc->dev;
2841 struct intel_encoder *encoder; 2824 struct intel_encoder *intel_encoder;
2842 2825
2843 /* 2826 /*
2844 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that 2827 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2845 * must be driven by its own crtc; no sharing is possible. 2828 * must be driven by its own crtc; no sharing is possible.
2846 */ 2829 */
2847 for_each_encoder_on_crtc(dev, crtc, encoder) { 2830 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2848 2831
2849 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell 2832 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2850 * CPU handles all others */ 2833 * CPU handles all others */
@@ -2852,19 +2835,19 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2852 /* It is still unclear how this will work on PPT, so throw up a warning */ 2835 /* It is still unclear how this will work on PPT, so throw up a warning */
2853 WARN_ON(!HAS_PCH_LPT(dev)); 2836 WARN_ON(!HAS_PCH_LPT(dev));
2854 2837
2855 if (encoder->type == DRM_MODE_ENCODER_DAC) { 2838 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
2856 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n"); 2839 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2857 return true; 2840 return true;
2858 } else { 2841 } else {
2859 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n", 2842 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2860 encoder->type); 2843 intel_encoder->type);
2861 return false; 2844 return false;
2862 } 2845 }
2863 } 2846 }
2864 2847
2865 switch (encoder->type) { 2848 switch (intel_encoder->type) {
2866 case INTEL_OUTPUT_EDP: 2849 case INTEL_OUTPUT_EDP:
2867 if (!intel_encoder_is_pch_edp(&encoder->base)) 2850 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2868 return false; 2851 return false;
2869 continue; 2852 continue;
2870 } 2853 }
@@ -3181,11 +3164,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3181 struct drm_device *dev = crtc->dev; 3164 struct drm_device *dev = crtc->dev;
3182 struct drm_i915_private *dev_priv = dev->dev_private; 3165 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3167 struct intel_encoder *encoder;
3184 int pipe = intel_crtc->pipe; 3168 int pipe = intel_crtc->pipe;
3185 int plane = intel_crtc->plane; 3169 int plane = intel_crtc->plane;
3186 u32 temp; 3170 u32 temp;
3187 bool is_pch_port; 3171 bool is_pch_port;
3188 3172
3173 WARN_ON(!crtc->enabled);
3174
3189 if (intel_crtc->active) 3175 if (intel_crtc->active)
3190 return; 3176 return;
3191 3177
@@ -3200,10 +3186,16 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3200 3186
3201 is_pch_port = intel_crtc_driving_pch(crtc); 3187 is_pch_port = intel_crtc_driving_pch(crtc);
3202 3188
3203 if (is_pch_port) 3189 if (is_pch_port) {
3204 ironlake_fdi_pll_enable(crtc); 3190 ironlake_fdi_pll_enable(intel_crtc);
3205 else 3191 } else {
3206 ironlake_fdi_disable(crtc); 3192 assert_fdi_tx_disabled(dev_priv, pipe);
3193 assert_fdi_rx_disabled(dev_priv, pipe);
3194 }
3195
3196 for_each_encoder_on_crtc(dev, crtc, encoder)
3197 if (encoder->pre_enable)
3198 encoder->pre_enable(encoder);
3207 3199
3208 /* Enable panel fitting for LVDS */ 3200 /* Enable panel fitting for LVDS */
3209 if (dev_priv->pch_pf_size && 3201 if (dev_priv->pch_pf_size &&
@@ -3234,6 +3226,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3234 mutex_unlock(&dev->struct_mutex); 3226 mutex_unlock(&dev->struct_mutex);
3235 3227
3236 intel_crtc_update_cursor(crtc, true); 3228 intel_crtc_update_cursor(crtc, true);
3229
3230 for_each_encoder_on_crtc(dev, crtc, encoder)
3231 encoder->enable(encoder);
3232
3233 if (HAS_PCH_CPT(dev))
3234 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3237} 3235}
3238 3236
3239static void ironlake_crtc_disable(struct drm_crtc *crtc) 3237static void ironlake_crtc_disable(struct drm_crtc *crtc)
@@ -3241,13 +3239,18 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3241 struct drm_device *dev = crtc->dev; 3239 struct drm_device *dev = crtc->dev;
3242 struct drm_i915_private *dev_priv = dev->dev_private; 3240 struct drm_i915_private *dev_priv = dev->dev_private;
3243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3242 struct intel_encoder *encoder;
3244 int pipe = intel_crtc->pipe; 3243 int pipe = intel_crtc->pipe;
3245 int plane = intel_crtc->plane; 3244 int plane = intel_crtc->plane;
3246 u32 reg, temp; 3245 u32 reg, temp;
3247 3246
3247
3248 if (!intel_crtc->active) 3248 if (!intel_crtc->active)
3249 return; 3249 return;
3250 3250
3251 for_each_encoder_on_crtc(dev, crtc, encoder)
3252 encoder->disable(encoder);
3253
3251 intel_crtc_wait_for_pending_flips(crtc); 3254 intel_crtc_wait_for_pending_flips(crtc);
3252 drm_vblank_off(dev, pipe); 3255 drm_vblank_off(dev, pipe);
3253 intel_crtc_update_cursor(crtc, false); 3256 intel_crtc_update_cursor(crtc, false);
@@ -3263,14 +3266,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3263 I915_WRITE(PF_CTL(pipe), 0); 3266 I915_WRITE(PF_CTL(pipe), 0);
3264 I915_WRITE(PF_WIN_SZ(pipe), 0); 3267 I915_WRITE(PF_WIN_SZ(pipe), 0);
3265 3268
3266 ironlake_fdi_disable(crtc); 3269 for_each_encoder_on_crtc(dev, crtc, encoder)
3270 if (encoder->post_disable)
3271 encoder->post_disable(encoder);
3267 3272
3268 /* This is a horrible layering violation; we should be doing this in 3273 ironlake_fdi_disable(crtc);
3269 * the connector/encoder ->prepare instead, but we don't always have
3270 * enough information there about the config to know whether it will
3271 * actually be necessary or just cause undesired flicker.
3272 */
3273 intel_disable_pch_ports(dev_priv, pipe);
3274 3274
3275 intel_disable_transcoder(dev_priv, pipe); 3275 intel_disable_transcoder(dev_priv, pipe);
3276 3276
@@ -3304,26 +3304,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3304 /* disable PCH DPLL */ 3304 /* disable PCH DPLL */
3305 intel_disable_pch_pll(intel_crtc); 3305 intel_disable_pch_pll(intel_crtc);
3306 3306
3307 /* Switch from PCDclk to Rawclk */ 3307 ironlake_fdi_pll_disable(intel_crtc);
3308 reg = FDI_RX_CTL(pipe);
3309 temp = I915_READ(reg);
3310 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3311
3312 /* Disable CPU FDI TX PLL */
3313 reg = FDI_TX_CTL(pipe);
3314 temp = I915_READ(reg);
3315 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3316
3317 POSTING_READ(reg);
3318 udelay(100);
3319
3320 reg = FDI_RX_CTL(pipe);
3321 temp = I915_READ(reg);
3322 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3323
3324 /* Wait for the clocks to turn off. */
3325 POSTING_READ(reg);
3326 udelay(100);
3327 3308
3328 intel_crtc->active = false; 3309 intel_crtc->active = false;
3329 intel_update_watermarks(dev); 3310 intel_update_watermarks(dev);
@@ -3333,30 +3314,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3333 mutex_unlock(&dev->struct_mutex); 3314 mutex_unlock(&dev->struct_mutex);
3334} 3315}
3335 3316
3336static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3337{
3338 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3339 int pipe = intel_crtc->pipe;
3340 int plane = intel_crtc->plane;
3341
3342 /* XXX: When our outputs are all unaware of DPMS modes other than off
3343 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3344 */
3345 switch (mode) {
3346 case DRM_MODE_DPMS_ON:
3347 case DRM_MODE_DPMS_STANDBY:
3348 case DRM_MODE_DPMS_SUSPEND:
3349 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3350 ironlake_crtc_enable(crtc);
3351 break;
3352
3353 case DRM_MODE_DPMS_OFF:
3354 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3355 ironlake_crtc_disable(crtc);
3356 break;
3357 }
3358}
3359
3360static void ironlake_crtc_off(struct drm_crtc *crtc) 3317static void ironlake_crtc_off(struct drm_crtc *crtc)
3361{ 3318{
3362 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3386,9 +3343,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3386 struct drm_device *dev = crtc->dev; 3343 struct drm_device *dev = crtc->dev;
3387 struct drm_i915_private *dev_priv = dev->dev_private; 3344 struct drm_i915_private *dev_priv = dev->dev_private;
3388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3346 struct intel_encoder *encoder;
3389 int pipe = intel_crtc->pipe; 3347 int pipe = intel_crtc->pipe;
3390 int plane = intel_crtc->plane; 3348 int plane = intel_crtc->plane;
3391 3349
3350 WARN_ON(!crtc->enabled);
3351
3392 if (intel_crtc->active) 3352 if (intel_crtc->active)
3393 return; 3353 return;
3394 3354
@@ -3405,6 +3365,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3405 /* Give the overlay scaler a chance to enable if it's on this pipe */ 3365 /* Give the overlay scaler a chance to enable if it's on this pipe */
3406 intel_crtc_dpms_overlay(intel_crtc, true); 3366 intel_crtc_dpms_overlay(intel_crtc, true);
3407 intel_crtc_update_cursor(crtc, true); 3367 intel_crtc_update_cursor(crtc, true);
3368
3369 for_each_encoder_on_crtc(dev, crtc, encoder)
3370 encoder->enable(encoder);
3408} 3371}
3409 3372
3410static void i9xx_crtc_disable(struct drm_crtc *crtc) 3373static void i9xx_crtc_disable(struct drm_crtc *crtc)
@@ -3412,12 +3375,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3412 struct drm_device *dev = crtc->dev; 3375 struct drm_device *dev = crtc->dev;
3413 struct drm_i915_private *dev_priv = dev->dev_private; 3376 struct drm_i915_private *dev_priv = dev->dev_private;
3414 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3377 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3378 struct intel_encoder *encoder;
3415 int pipe = intel_crtc->pipe; 3379 int pipe = intel_crtc->pipe;
3416 int plane = intel_crtc->plane; 3380 int plane = intel_crtc->plane;
3417 3381
3382
3418 if (!intel_crtc->active) 3383 if (!intel_crtc->active)
3419 return; 3384 return;
3420 3385
3386 for_each_encoder_on_crtc(dev, crtc, encoder)
3387 encoder->disable(encoder);
3388
3421 /* Give the overlay scaler a chance to disable if it's on this pipe */ 3389 /* Give the overlay scaler a chance to disable if it's on this pipe */
3422 intel_crtc_wait_for_pending_flips(crtc); 3390 intel_crtc_wait_for_pending_flips(crtc);
3423 drm_vblank_off(dev, pipe); 3391 drm_vblank_off(dev, pipe);
@@ -3436,45 +3404,17 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3436 intel_update_watermarks(dev); 3404 intel_update_watermarks(dev);
3437} 3405}
3438 3406
3439static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3440{
3441 /* XXX: When our outputs are all unaware of DPMS modes other than off
3442 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3443 */
3444 switch (mode) {
3445 case DRM_MODE_DPMS_ON:
3446 case DRM_MODE_DPMS_STANDBY:
3447 case DRM_MODE_DPMS_SUSPEND:
3448 i9xx_crtc_enable(crtc);
3449 break;
3450 case DRM_MODE_DPMS_OFF:
3451 i9xx_crtc_disable(crtc);
3452 break;
3453 }
3454}
3455
3456static void i9xx_crtc_off(struct drm_crtc *crtc) 3407static void i9xx_crtc_off(struct drm_crtc *crtc)
3457{ 3408{
3458} 3409}
3459 3410
3460/** 3411static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3461 * Sets the power management mode of the pipe and plane. 3412 bool enabled)
3462 */
3463static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3464{ 3413{
3465 struct drm_device *dev = crtc->dev; 3414 struct drm_device *dev = crtc->dev;
3466 struct drm_i915_private *dev_priv = dev->dev_private;
3467 struct drm_i915_master_private *master_priv; 3415 struct drm_i915_master_private *master_priv;
3468 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3469 int pipe = intel_crtc->pipe; 3417 int pipe = intel_crtc->pipe;
3470 bool enabled;
3471
3472 if (intel_crtc->dpms_mode == mode)
3473 return;
3474
3475 intel_crtc->dpms_mode = mode;
3476
3477 dev_priv->display.dpms(crtc, mode);
3478 3418
3479 if (!dev->primary->master) 3419 if (!dev->primary->master)
3480 return; 3420 return;
@@ -3483,8 +3423,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3483 if (!master_priv->sarea_priv) 3423 if (!master_priv->sarea_priv)
3484 return; 3424 return;
3485 3425
3486 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3487
3488 switch (pipe) { 3426 switch (pipe) {
3489 case 0: 3427 case 0:
3490 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; 3428 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
@@ -3500,13 +3438,42 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3500 } 3438 }
3501} 3439}
3502 3440
3441/**
3442 * Sets the power management mode of the pipe and plane.
3443 */
3444void intel_crtc_update_dpms(struct drm_crtc *crtc)
3445{
3446 struct drm_device *dev = crtc->dev;
3447 struct drm_i915_private *dev_priv = dev->dev_private;
3448 struct intel_encoder *intel_encoder;
3449 bool enable = false;
3450
3451 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3452 enable |= intel_encoder->connectors_active;
3453
3454 if (enable)
3455 dev_priv->display.crtc_enable(crtc);
3456 else
3457 dev_priv->display.crtc_disable(crtc);
3458
3459 intel_crtc_update_sarea(crtc, enable);
3460}
3461
3462static void intel_crtc_noop(struct drm_crtc *crtc)
3463{
3464}
3465
3503static void intel_crtc_disable(struct drm_crtc *crtc) 3466static void intel_crtc_disable(struct drm_crtc *crtc)
3504{ 3467{
3505 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3506 struct drm_device *dev = crtc->dev; 3468 struct drm_device *dev = crtc->dev;
3469 struct drm_connector *connector;
3507 struct drm_i915_private *dev_priv = dev->dev_private; 3470 struct drm_i915_private *dev_priv = dev->dev_private;
3508 3471
3509 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 3472 /* crtc should still be enabled when we disable it. */
3473 WARN_ON(!crtc->enabled);
3474
3475 dev_priv->display.crtc_disable(crtc);
3476 intel_crtc_update_sarea(crtc, false);
3510 dev_priv->display.off(crtc); 3477 dev_priv->display.off(crtc);
3511 3478
3512 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); 3479 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
@@ -3516,63 +3483,128 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
3516 mutex_lock(&dev->struct_mutex); 3483 mutex_lock(&dev->struct_mutex);
3517 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 3484 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3518 mutex_unlock(&dev->struct_mutex); 3485 mutex_unlock(&dev->struct_mutex);
3486 crtc->fb = NULL;
3487 }
3488
3489 /* Update computed state. */
3490 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3491 if (!connector->encoder || !connector->encoder->crtc)
3492 continue;
3493
3494 if (connector->encoder->crtc != crtc)
3495 continue;
3496
3497 connector->dpms = DRM_MODE_DPMS_OFF;
3498 to_intel_encoder(connector->encoder)->connectors_active = false;
3519 } 3499 }
3520} 3500}
3521 3501
3522/* Prepare for a mode set. 3502void intel_modeset_disable(struct drm_device *dev)
3523 *
3524 * Note we could be a lot smarter here. We need to figure out which outputs
3525 * will be enabled, which disabled (in short, how the config will changes)
3526 * and perform the minimum necessary steps to accomplish that, e.g. updating
3527 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3528 * panel fitting is in the proper state, etc.
3529 */
3530static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3531{ 3503{
3532 i9xx_crtc_disable(crtc); 3504 struct drm_crtc *crtc;
3505
3506 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3507 if (crtc->enabled)
3508 intel_crtc_disable(crtc);
3509 }
3533} 3510}
3534 3511
3535static void i9xx_crtc_commit(struct drm_crtc *crtc) 3512void intel_encoder_noop(struct drm_encoder *encoder)
3536{ 3513{
3537 i9xx_crtc_enable(crtc);
3538} 3514}
3539 3515
3540static void ironlake_crtc_prepare(struct drm_crtc *crtc) 3516void intel_encoder_destroy(struct drm_encoder *encoder)
3541{ 3517{
3542 ironlake_crtc_disable(crtc); 3518 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3519
3520 drm_encoder_cleanup(encoder);
3521 kfree(intel_encoder);
3543} 3522}
3544 3523
3545static void ironlake_crtc_commit(struct drm_crtc *crtc) 3524/* Simple dpms helper for encodres with just one connector, no cloning and only
3525 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3526 * state of the entire output pipe. */
3527void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3546{ 3528{
3547 ironlake_crtc_enable(crtc); 3529 if (mode == DRM_MODE_DPMS_ON) {
3530 encoder->connectors_active = true;
3531
3532 intel_crtc_update_dpms(encoder->base.crtc);
3533 } else {
3534 encoder->connectors_active = false;
3535
3536 intel_crtc_update_dpms(encoder->base.crtc);
3537 }
3548} 3538}
3549 3539
3550void intel_encoder_prepare(struct drm_encoder *encoder) 3540/* Cross check the actual hw state with our own modeset state tracking (and it's
3541 * internal consistency). */
3542static void intel_connector_check_state(struct intel_connector *connector)
3551{ 3543{
3552 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3544 if (connector->get_hw_state(connector)) {
3553 /* lvds has its own version of prepare see intel_lvds_prepare */ 3545 struct intel_encoder *encoder = connector->encoder;
3554 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 3546 struct drm_crtc *crtc;
3547 bool encoder_enabled;
3548 enum pipe pipe;
3549
3550 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3551 connector->base.base.id,
3552 drm_get_connector_name(&connector->base));
3553
3554 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3555 "wrong connector dpms state\n");
3556 WARN(connector->base.encoder != &encoder->base,
3557 "active connector not linked to encoder\n");
3558 WARN(!encoder->connectors_active,
3559 "encoder->connectors_active not set\n");
3560
3561 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3562 WARN(!encoder_enabled, "encoder not enabled\n");
3563 if (WARN_ON(!encoder->base.crtc))
3564 return;
3565
3566 crtc = encoder->base.crtc;
3567
3568 WARN(!crtc->enabled, "crtc not enabled\n");
3569 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3570 WARN(pipe != to_intel_crtc(crtc)->pipe,
3571 "encoder active on the wrong pipe\n");
3572 }
3555} 3573}
3556 3574
3557void intel_encoder_commit(struct drm_encoder *encoder) 3575/* Even simpler default implementation, if there's really no special case to
3576 * consider. */
3577void intel_connector_dpms(struct drm_connector *connector, int mode)
3558{ 3578{
3559 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3579 struct intel_encoder *encoder = intel_attached_encoder(connector);
3560 struct drm_device *dev = encoder->dev;
3561 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
3562 3580
3563 /* lvds has its own version of commit see intel_lvds_commit */ 3581 /* All the simple cases only support two dpms states. */
3564 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 3582 if (mode != DRM_MODE_DPMS_ON)
3583 mode = DRM_MODE_DPMS_OFF;
3565 3584
3566 if (HAS_PCH_CPT(dev)) 3585 if (mode == connector->dpms)
3567 intel_cpt_verify_modeset(dev, intel_crtc->pipe); 3586 return;
3587
3588 connector->dpms = mode;
3589
3590 /* Only need to change hw state when actually enabled */
3591 if (encoder->base.crtc)
3592 intel_encoder_dpms(encoder, mode);
3593 else
3594 WARN_ON(encoder->connectors_active != false);
3595
3596 intel_modeset_check_state(connector->dev);
3568} 3597}
3569 3598
3570void intel_encoder_destroy(struct drm_encoder *encoder) 3599/* Simple connector->get_hw_state implementation for encoders that support only
3600 * one connector and no cloning and hence the encoder state determines the state
3601 * of the connector. */
3602bool intel_connector_get_hw_state(struct intel_connector *connector)
3571{ 3603{
3572 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3604 enum pipe pipe = 0;
3605 struct intel_encoder *encoder = connector->encoder;
3573 3606
3574 drm_encoder_cleanup(encoder); 3607 return encoder->get_hw_state(encoder, &pipe);
3575 kfree(intel_encoder);
3576} 3608}
3577 3609
3578static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, 3610static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -3593,6 +3625,13 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3593 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET)) 3625 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3594 drm_mode_set_crtcinfo(adjusted_mode, 0); 3626 drm_mode_set_crtcinfo(adjusted_mode, 0);
3595 3627
3628 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3629 * with a hsync front porch of 0.
3630 */
3631 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3632 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3633 return false;
3634
3596 return true; 3635 return true;
3597} 3636}
3598 3637
@@ -3728,6 +3767,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3728 * true if they don't match). 3767 * true if they don't match).
3729 */ 3768 */
3730static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, 3769static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3770 struct drm_framebuffer *fb,
3731 unsigned int *pipe_bpp, 3771 unsigned int *pipe_bpp,
3732 struct drm_display_mode *mode) 3772 struct drm_display_mode *mode)
3733{ 3773{
@@ -3797,7 +3837,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3797 * also stays within the max display bpc discovered above. 3837 * also stays within the max display bpc discovered above.
3798 */ 3838 */
3799 3839
3800 switch (crtc->fb->depth) { 3840 switch (fb->depth) {
3801 case 8: 3841 case 8:
3802 bpc = 8; /* since we go through a colormap */ 3842 bpc = 8; /* since we go through a colormap */
3803 break; 3843 break;
@@ -4216,7 +4256,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4216 struct drm_display_mode *mode, 4256 struct drm_display_mode *mode,
4217 struct drm_display_mode *adjusted_mode, 4257 struct drm_display_mode *adjusted_mode,
4218 int x, int y, 4258 int x, int y,
4219 struct drm_framebuffer *old_fb) 4259 struct drm_framebuffer *fb)
4220{ 4260{
4221 struct drm_device *dev = crtc->dev; 4261 struct drm_device *dev = crtc->dev;
4222 struct drm_i915_private *dev_priv = dev->dev_private; 4262 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4406,7 +4446,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4406 I915_WRITE(DSPCNTR(plane), dspcntr); 4446 I915_WRITE(DSPCNTR(plane), dspcntr);
4407 POSTING_READ(DSPCNTR(plane)); 4447 POSTING_READ(DSPCNTR(plane));
4408 4448
4409 ret = intel_pipe_set_base(crtc, x, y, old_fb); 4449 ret = intel_pipe_set_base(crtc, x, y, fb);
4410 4450
4411 intel_update_watermarks(dev); 4451 intel_update_watermarks(dev);
4412 4452
@@ -4560,24 +4600,130 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
4560 return 120000; 4600 return 120000;
4561} 4601}
4562 4602
4603static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4604 struct drm_display_mode *adjusted_mode,
4605 bool dither)
4606{
4607 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4608 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4609 int pipe = intel_crtc->pipe;
4610 uint32_t val;
4611
4612 val = I915_READ(PIPECONF(pipe));
4613
4614 val &= ~PIPE_BPC_MASK;
4615 switch (intel_crtc->bpp) {
4616 case 18:
4617 val |= PIPE_6BPC;
4618 break;
4619 case 24:
4620 val |= PIPE_8BPC;
4621 break;
4622 case 30:
4623 val |= PIPE_10BPC;
4624 break;
4625 case 36:
4626 val |= PIPE_12BPC;
4627 break;
4628 default:
4629 val |= PIPE_8BPC;
4630 break;
4631 }
4632
4633 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4634 if (dither)
4635 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4636
4637 val &= ~PIPECONF_INTERLACE_MASK;
4638 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4639 val |= PIPECONF_INTERLACED_ILK;
4640 else
4641 val |= PIPECONF_PROGRESSIVE;
4642
4643 I915_WRITE(PIPECONF(pipe), val);
4644 POSTING_READ(PIPECONF(pipe));
4645}
4646
4647static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4648 struct drm_display_mode *adjusted_mode,
4649 intel_clock_t *clock,
4650 bool *has_reduced_clock,
4651 intel_clock_t *reduced_clock)
4652{
4653 struct drm_device *dev = crtc->dev;
4654 struct drm_i915_private *dev_priv = dev->dev_private;
4655 struct intel_encoder *intel_encoder;
4656 int refclk;
4657 const intel_limit_t *limit;
4658 bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
4659
4660 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4661 switch (intel_encoder->type) {
4662 case INTEL_OUTPUT_LVDS:
4663 is_lvds = true;
4664 break;
4665 case INTEL_OUTPUT_SDVO:
4666 case INTEL_OUTPUT_HDMI:
4667 is_sdvo = true;
4668 if (intel_encoder->needs_tv_clock)
4669 is_tv = true;
4670 break;
4671 case INTEL_OUTPUT_TVOUT:
4672 is_tv = true;
4673 break;
4674 }
4675 }
4676
4677 refclk = ironlake_get_refclk(crtc);
4678
4679 /*
4680 * Returns a set of divisors for the desired target clock with the given
4681 * refclk, or FALSE. The returned values represent the clock equation:
4682 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4683 */
4684 limit = intel_limit(crtc, refclk);
4685 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4686 clock);
4687 if (!ret)
4688 return false;
4689
4690 if (is_lvds && dev_priv->lvds_downclock_avail) {
4691 /*
4692 * Ensure we match the reduced clock's P to the target clock.
4693 * If the clocks don't match, we can't switch the display clock
4694 * by using the FP0/FP1. In such case we will disable the LVDS
4695 * downclock feature.
4696 */
4697 *has_reduced_clock = limit->find_pll(limit, crtc,
4698 dev_priv->lvds_downclock,
4699 refclk,
4700 clock,
4701 reduced_clock);
4702 }
4703
4704 if (is_sdvo && is_tv)
4705 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
4706
4707 return true;
4708}
4709
4563static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 4710static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4564 struct drm_display_mode *mode, 4711 struct drm_display_mode *mode,
4565 struct drm_display_mode *adjusted_mode, 4712 struct drm_display_mode *adjusted_mode,
4566 int x, int y, 4713 int x, int y,
4567 struct drm_framebuffer *old_fb) 4714 struct drm_framebuffer *fb)
4568{ 4715{
4569 struct drm_device *dev = crtc->dev; 4716 struct drm_device *dev = crtc->dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4717 struct drm_i915_private *dev_priv = dev->dev_private;
4571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4718 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4572 int pipe = intel_crtc->pipe; 4719 int pipe = intel_crtc->pipe;
4573 int plane = intel_crtc->plane; 4720 int plane = intel_crtc->plane;
4574 int refclk, num_connectors = 0; 4721 int num_connectors = 0;
4575 intel_clock_t clock, reduced_clock; 4722 intel_clock_t clock, reduced_clock;
4576 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 4723 u32 dpll, fp = 0, fp2 = 0;
4577 bool ok, has_reduced_clock = false, is_sdvo = false; 4724 bool ok, has_reduced_clock = false, is_sdvo = false;
4578 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 4725 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4579 struct intel_encoder *encoder, *edp_encoder = NULL; 4726 struct intel_encoder *encoder, *edp_encoder = NULL;
4580 const intel_limit_t *limit;
4581 int ret; 4727 int ret;
4582 struct fdi_m_n m_n = {0}; 4728 struct fdi_m_n m_n = {0};
4583 u32 temp; 4729 u32 temp;
@@ -4619,16 +4765,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4619 num_connectors++; 4765 num_connectors++;
4620 } 4766 }
4621 4767
4622 refclk = ironlake_get_refclk(crtc); 4768 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
4623 4769 &has_reduced_clock, &reduced_clock);
4624 /*
4625 * Returns a set of divisors for the desired target clock with the given
4626 * refclk, or FALSE. The returned values represent the clock equation:
4627 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4628 */
4629 limit = intel_limit(crtc, refclk);
4630 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4631 &clock);
4632 if (!ok) { 4770 if (!ok) {
4633 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 4771 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4634 return -EINVAL; 4772 return -EINVAL;
@@ -4637,24 +4775,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4637 /* Ensure that the cursor is valid for the new mode before changing... */ 4775 /* Ensure that the cursor is valid for the new mode before changing... */
4638 intel_crtc_update_cursor(crtc, true); 4776 intel_crtc_update_cursor(crtc, true);
4639 4777
4640 if (is_lvds && dev_priv->lvds_downclock_avail) {
4641 /*
4642 * Ensure we match the reduced clock's P to the target clock.
4643 * If the clocks don't match, we can't switch the display clock
4644 * by using the FP0/FP1. In such case we will disable the LVDS
4645 * downclock feature.
4646 */
4647 has_reduced_clock = limit->find_pll(limit, crtc,
4648 dev_priv->lvds_downclock,
4649 refclk,
4650 &clock,
4651 &reduced_clock);
4652 }
4653
4654 if (is_sdvo && is_tv)
4655 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4656
4657
4658 /* FDI link */ 4778 /* FDI link */
4659 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 4779 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4660 lane = 0; 4780 lane = 0;
@@ -4682,32 +4802,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4682 target_clock = adjusted_mode->clock; 4802 target_clock = adjusted_mode->clock;
4683 4803
4684 /* determine panel color depth */ 4804 /* determine panel color depth */
4685 temp = I915_READ(PIPECONF(pipe)); 4805 dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, mode);
4686 temp &= ~PIPE_BPC_MASK; 4806 if (is_lvds && dev_priv->lvds_dither)
4687 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); 4807 dither = true;
4688 switch (pipe_bpp) { 4808
4689 case 18: 4809 if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
4690 temp |= PIPE_6BPC; 4810 pipe_bpp != 36) {
4691 break;
4692 case 24:
4693 temp |= PIPE_8BPC;
4694 break;
4695 case 30:
4696 temp |= PIPE_10BPC;
4697 break;
4698 case 36:
4699 temp |= PIPE_12BPC;
4700 break;
4701 default:
4702 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", 4811 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4703 pipe_bpp); 4812 pipe_bpp);
4704 temp |= PIPE_8BPC;
4705 pipe_bpp = 24; 4813 pipe_bpp = 24;
4706 break;
4707 } 4814 }
4708
4709 intel_crtc->bpp = pipe_bpp; 4815 intel_crtc->bpp = pipe_bpp;
4710 I915_WRITE(PIPECONF(pipe), temp);
4711 4816
4712 if (!lane) { 4817 if (!lane) {
4713 /* 4818 /*
@@ -4791,12 +4896,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4791 else 4896 else
4792 dpll |= PLL_REF_INPUT_DREFCLK; 4897 dpll |= PLL_REF_INPUT_DREFCLK;
4793 4898
4794 /* setup pipeconf */
4795 pipeconf = I915_READ(PIPECONF(pipe));
4796
4797 /* Set up the display plane register */
4798 dspcntr = DISPPLANE_GAMMA_ENABLE;
4799
4800 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 4899 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4801 drm_mode_debug_printmodeline(mode); 4900 drm_mode_debug_printmodeline(mode);
4802 4901
@@ -4856,12 +4955,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4856 I915_WRITE(PCH_LVDS, temp); 4955 I915_WRITE(PCH_LVDS, temp);
4857 } 4956 }
4858 4957
4859 pipeconf &= ~PIPECONF_DITHER_EN;
4860 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4861 if ((is_lvds && dev_priv->lvds_dither) || dither) {
4862 pipeconf |= PIPECONF_DITHER_EN;
4863 pipeconf |= PIPECONF_DITHER_TYPE_SP;
4864 }
4865 if (is_dp && !is_cpu_edp) { 4958 if (is_dp && !is_cpu_edp) {
4866 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4959 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4867 } else { 4960 } else {
@@ -4897,9 +4990,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4897 } 4990 }
4898 } 4991 }
4899 4992
4900 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4901 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4993 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4902 pipeconf |= PIPECONF_INTERLACED_ILK;
4903 /* the chip adds 2 halflines automatically */ 4994 /* the chip adds 2 halflines automatically */
4904 adjusted_mode->crtc_vtotal -= 1; 4995 adjusted_mode->crtc_vtotal -= 1;
4905 adjusted_mode->crtc_vblank_end -= 1; 4996 adjusted_mode->crtc_vblank_end -= 1;
@@ -4907,7 +4998,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4907 adjusted_mode->crtc_hsync_start 4998 adjusted_mode->crtc_hsync_start
4908 - adjusted_mode->crtc_htotal/2); 4999 - adjusted_mode->crtc_htotal/2);
4909 } else { 5000 } else {
4910 pipeconf |= PIPECONF_PROGRESSIVE;
4911 I915_WRITE(VSYNCSHIFT(pipe), 0); 5001 I915_WRITE(VSYNCSHIFT(pipe), 0);
4912 } 5002 }
4913 5003
@@ -4945,15 +5035,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4945 if (is_cpu_edp) 5035 if (is_cpu_edp)
4946 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5036 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4947 5037
4948 I915_WRITE(PIPECONF(pipe), pipeconf); 5038 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
4949 POSTING_READ(PIPECONF(pipe));
4950 5039
4951 intel_wait_for_vblank(dev, pipe); 5040 intel_wait_for_vblank(dev, pipe);
4952 5041
4953 I915_WRITE(DSPCNTR(plane), dspcntr); 5042 /* Set up the display plane register */
5043 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
4954 POSTING_READ(DSPCNTR(plane)); 5044 POSTING_READ(DSPCNTR(plane));
4955 5045
4956 ret = intel_pipe_set_base(crtc, x, y, old_fb); 5046 ret = intel_pipe_set_base(crtc, x, y, fb);
4957 5047
4958 intel_update_watermarks(dev); 5048 intel_update_watermarks(dev);
4959 5049
@@ -4966,7 +5056,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4966 struct drm_display_mode *mode, 5056 struct drm_display_mode *mode,
4967 struct drm_display_mode *adjusted_mode, 5057 struct drm_display_mode *adjusted_mode,
4968 int x, int y, 5058 int x, int y,
4969 struct drm_framebuffer *old_fb) 5059 struct drm_framebuffer *fb)
4970{ 5060{
4971 struct drm_device *dev = crtc->dev; 5061 struct drm_device *dev = crtc->dev;
4972 struct drm_i915_private *dev_priv = dev->dev_private; 5062 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4977,14 +5067,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4977 drm_vblank_pre_modeset(dev, pipe); 5067 drm_vblank_pre_modeset(dev, pipe);
4978 5068
4979 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5069 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4980 x, y, old_fb); 5070 x, y, fb);
4981 drm_vblank_post_modeset(dev, pipe); 5071 drm_vblank_post_modeset(dev, pipe);
4982 5072
4983 if (ret)
4984 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4985 else
4986 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4987
4988 return ret; 5073 return ret;
4989} 5074}
4990 5075
@@ -5057,6 +5142,91 @@ static void g4x_write_eld(struct drm_connector *connector,
5057 I915_WRITE(G4X_AUD_CNTL_ST, i); 5142 I915_WRITE(G4X_AUD_CNTL_ST, i);
5058} 5143}
5059 5144
5145static void haswell_write_eld(struct drm_connector *connector,
5146 struct drm_crtc *crtc)
5147{
5148 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5149 uint8_t *eld = connector->eld;
5150 struct drm_device *dev = crtc->dev;
5151 uint32_t eldv;
5152 uint32_t i;
5153 int len;
5154 int pipe = to_intel_crtc(crtc)->pipe;
5155 int tmp;
5156
5157 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
5158 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
5159 int aud_config = HSW_AUD_CFG(pipe);
5160 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
5161
5162
5163 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
5164
5165 /* Audio output enable */
5166 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
5167 tmp = I915_READ(aud_cntrl_st2);
5168 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
5169 I915_WRITE(aud_cntrl_st2, tmp);
5170
5171 /* Wait for 1 vertical blank */
5172 intel_wait_for_vblank(dev, pipe);
5173
5174 /* Set ELD valid state */
5175 tmp = I915_READ(aud_cntrl_st2);
5176 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
5177 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
5178 I915_WRITE(aud_cntrl_st2, tmp);
5179 tmp = I915_READ(aud_cntrl_st2);
5180 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
5181
5182 /* Enable HDMI mode */
5183 tmp = I915_READ(aud_config);
5184 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
5185 /* clear N_programing_enable and N_value_index */
5186 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
5187 I915_WRITE(aud_config, tmp);
5188
5189 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5190
5191 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5192
5193 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5194 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5195 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5196 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5197 } else
5198 I915_WRITE(aud_config, 0);
5199
5200 if (intel_eld_uptodate(connector,
5201 aud_cntrl_st2, eldv,
5202 aud_cntl_st, IBX_ELD_ADDRESS,
5203 hdmiw_hdmiedid))
5204 return;
5205
5206 i = I915_READ(aud_cntrl_st2);
5207 i &= ~eldv;
5208 I915_WRITE(aud_cntrl_st2, i);
5209
5210 if (!eld[0])
5211 return;
5212
5213 i = I915_READ(aud_cntl_st);
5214 i &= ~IBX_ELD_ADDRESS;
5215 I915_WRITE(aud_cntl_st, i);
5216 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5217 DRM_DEBUG_DRIVER("port num:%d\n", i);
5218
5219 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5220 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5221 for (i = 0; i < len; i++)
5222 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5223
5224 i = I915_READ(aud_cntrl_st2);
5225 i |= eldv;
5226 I915_WRITE(aud_cntrl_st2, i);
5227
5228}
5229
5060static void ironlake_write_eld(struct drm_connector *connector, 5230static void ironlake_write_eld(struct drm_connector *connector,
5061 struct drm_crtc *crtc) 5231 struct drm_crtc *crtc)
5062{ 5232{
@@ -5069,28 +5239,24 @@ static void ironlake_write_eld(struct drm_connector *connector,
5069 int aud_config; 5239 int aud_config;
5070 int aud_cntl_st; 5240 int aud_cntl_st;
5071 int aud_cntrl_st2; 5241 int aud_cntrl_st2;
5242 int pipe = to_intel_crtc(crtc)->pipe;
5072 5243
5073 if (HAS_PCH_IBX(connector->dev)) { 5244 if (HAS_PCH_IBX(connector->dev)) {
5074 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A; 5245 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
5075 aud_config = IBX_AUD_CONFIG_A; 5246 aud_config = IBX_AUD_CFG(pipe);
5076 aud_cntl_st = IBX_AUD_CNTL_ST_A; 5247 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
5077 aud_cntrl_st2 = IBX_AUD_CNTL_ST2; 5248 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
5078 } else { 5249 } else {
5079 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A; 5250 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
5080 aud_config = CPT_AUD_CONFIG_A; 5251 aud_config = CPT_AUD_CFG(pipe);
5081 aud_cntl_st = CPT_AUD_CNTL_ST_A; 5252 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
5082 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2; 5253 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
5083 } 5254 }
5084 5255
5085 i = to_intel_crtc(crtc)->pipe; 5256 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5086 hdmiw_hdmiedid += i * 0x100;
5087 aud_cntl_st += i * 0x100;
5088 aud_config += i * 0x100;
5089
5090 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
5091 5257
5092 i = I915_READ(aud_cntl_st); 5258 i = I915_READ(aud_cntl_st);
5093 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ 5259 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5094 if (!i) { 5260 if (!i) {
5095 DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); 5261 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5096 /* operate blindly on all ports */ 5262 /* operate blindly on all ports */
@@ -5337,8 +5503,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5337 uint32_t addr; 5503 uint32_t addr;
5338 int ret; 5504 int ret;
5339 5505
5340 DRM_DEBUG_KMS("\n");
5341
5342 /* if we want to turn off the cursor ignore width and height */ 5506 /* if we want to turn off the cursor ignore width and height */
5343 if (!handle) { 5507 if (!handle) {
5344 DRM_DEBUG_KMS("cursor off\n"); 5508 DRM_DEBUG_KMS("cursor off\n");
@@ -5584,17 +5748,18 @@ mode_fits_in_fbdev(struct drm_device *dev,
5584 return fb; 5748 return fb;
5585} 5749}
5586 5750
5587bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 5751bool intel_get_load_detect_pipe(struct drm_connector *connector,
5588 struct drm_connector *connector,
5589 struct drm_display_mode *mode, 5752 struct drm_display_mode *mode,
5590 struct intel_load_detect_pipe *old) 5753 struct intel_load_detect_pipe *old)
5591{ 5754{
5592 struct intel_crtc *intel_crtc; 5755 struct intel_crtc *intel_crtc;
5756 struct intel_encoder *intel_encoder =
5757 intel_attached_encoder(connector);
5593 struct drm_crtc *possible_crtc; 5758 struct drm_crtc *possible_crtc;
5594 struct drm_encoder *encoder = &intel_encoder->base; 5759 struct drm_encoder *encoder = &intel_encoder->base;
5595 struct drm_crtc *crtc = NULL; 5760 struct drm_crtc *crtc = NULL;
5596 struct drm_device *dev = encoder->dev; 5761 struct drm_device *dev = encoder->dev;
5597 struct drm_framebuffer *old_fb; 5762 struct drm_framebuffer *fb;
5598 int i = -1; 5763 int i = -1;
5599 5764
5600 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5765 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -5615,21 +5780,12 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5615 if (encoder->crtc) { 5780 if (encoder->crtc) {
5616 crtc = encoder->crtc; 5781 crtc = encoder->crtc;
5617 5782
5618 intel_crtc = to_intel_crtc(crtc); 5783 old->dpms_mode = connector->dpms;
5619 old->dpms_mode = intel_crtc->dpms_mode;
5620 old->load_detect_temp = false; 5784 old->load_detect_temp = false;
5621 5785
5622 /* Make sure the crtc and connector are running */ 5786 /* Make sure the crtc and connector are running */
5623 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { 5787 if (connector->dpms != DRM_MODE_DPMS_ON)
5624 struct drm_encoder_helper_funcs *encoder_funcs; 5788 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
5625 struct drm_crtc_helper_funcs *crtc_funcs;
5626
5627 crtc_funcs = crtc->helper_private;
5628 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5629
5630 encoder_funcs = encoder->helper_private;
5631 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5632 }
5633 5789
5634 return true; 5790 return true;
5635 } 5791 }
@@ -5653,19 +5809,17 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5653 return false; 5809 return false;
5654 } 5810 }
5655 5811
5656 encoder->crtc = crtc; 5812 intel_encoder->new_crtc = to_intel_crtc(crtc);
5657 connector->encoder = encoder; 5813 to_intel_connector(connector)->new_encoder = intel_encoder;
5658 5814
5659 intel_crtc = to_intel_crtc(crtc); 5815 intel_crtc = to_intel_crtc(crtc);
5660 old->dpms_mode = intel_crtc->dpms_mode; 5816 old->dpms_mode = connector->dpms;
5661 old->load_detect_temp = true; 5817 old->load_detect_temp = true;
5662 old->release_fb = NULL; 5818 old->release_fb = NULL;
5663 5819
5664 if (!mode) 5820 if (!mode)
5665 mode = &load_detect_mode; 5821 mode = &load_detect_mode;
5666 5822
5667 old_fb = crtc->fb;
5668
5669 /* We need a framebuffer large enough to accommodate all accesses 5823 /* We need a framebuffer large enough to accommodate all accesses
5670 * that the plane may generate whilst we perform load detection. 5824 * that the plane may generate whilst we perform load detection.
5671 * We can not rely on the fbcon either being present (we get called 5825 * We can not rely on the fbcon either being present (we get called
@@ -5673,50 +5827,52 @@ bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5673 * not even exist) or that it is large enough to satisfy the 5827 * not even exist) or that it is large enough to satisfy the
5674 * requested mode. 5828 * requested mode.
5675 */ 5829 */
5676 crtc->fb = mode_fits_in_fbdev(dev, mode); 5830 fb = mode_fits_in_fbdev(dev, mode);
5677 if (crtc->fb == NULL) { 5831 if (fb == NULL) {
5678 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 5832 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5679 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 5833 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5680 old->release_fb = crtc->fb; 5834 old->release_fb = fb;
5681 } else 5835 } else
5682 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 5836 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5683 if (IS_ERR(crtc->fb)) { 5837 if (IS_ERR(fb)) {
5684 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 5838 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5685 crtc->fb = old_fb; 5839 goto fail;
5686 return false;
5687 } 5840 }
5688 5841
5689 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { 5842 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
5690 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 5843 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5691 if (old->release_fb) 5844 if (old->release_fb)
5692 old->release_fb->funcs->destroy(old->release_fb); 5845 old->release_fb->funcs->destroy(old->release_fb);
5693 crtc->fb = old_fb; 5846 goto fail;
5694 return false;
5695 } 5847 }
5696 5848
5697 /* let the connector get through one full cycle before testing */ 5849 /* let the connector get through one full cycle before testing */
5698 intel_wait_for_vblank(dev, intel_crtc->pipe); 5850 intel_wait_for_vblank(dev, intel_crtc->pipe);
5699 5851
5700 return true; 5852 return true;
5853fail:
5854 connector->encoder = NULL;
5855 encoder->crtc = NULL;
5856 return false;
5701} 5857}
5702 5858
5703void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 5859void intel_release_load_detect_pipe(struct drm_connector *connector,
5704 struct drm_connector *connector,
5705 struct intel_load_detect_pipe *old) 5860 struct intel_load_detect_pipe *old)
5706{ 5861{
5862 struct intel_encoder *intel_encoder =
5863 intel_attached_encoder(connector);
5707 struct drm_encoder *encoder = &intel_encoder->base; 5864 struct drm_encoder *encoder = &intel_encoder->base;
5708 struct drm_device *dev = encoder->dev;
5709 struct drm_crtc *crtc = encoder->crtc;
5710 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5711 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5712 5865
5713 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 5866 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5714 connector->base.id, drm_get_connector_name(connector), 5867 connector->base.id, drm_get_connector_name(connector),
5715 encoder->base.id, drm_get_encoder_name(encoder)); 5868 encoder->base.id, drm_get_encoder_name(encoder));
5716 5869
5717 if (old->load_detect_temp) { 5870 if (old->load_detect_temp) {
5718 connector->encoder = NULL; 5871 struct drm_crtc *crtc = encoder->crtc;
5719 drm_helper_disable_unused_functions(dev); 5872
5873 to_intel_connector(connector)->new_encoder = NULL;
5874 intel_encoder->new_crtc = NULL;
5875 intel_set_mode(crtc, NULL, 0, 0, NULL);
5720 5876
5721 if (old->release_fb) 5877 if (old->release_fb)
5722 old->release_fb->funcs->destroy(old->release_fb); 5878 old->release_fb->funcs->destroy(old->release_fb);
@@ -5725,10 +5881,8 @@ void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5725 } 5881 }
5726 5882
5727 /* Switch crtc and encoder back off if necessary */ 5883 /* Switch crtc and encoder back off if necessary */
5728 if (old->dpms_mode != DRM_MODE_DPMS_ON) { 5884 if (old->dpms_mode != DRM_MODE_DPMS_ON)
5729 encoder_funcs->dpms(encoder, old->dpms_mode); 5885 connector->funcs->dpms(connector, old->dpms_mode);
5730 crtc_funcs->dpms(crtc, old->dpms_mode);
5731 }
5732} 5886}
5733 5887
5734/* Returns the clock of the currently programmed mode of the given pipe. */ 5888/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -5850,46 +6004,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5850 return mode; 6004 return mode;
5851} 6005}
5852 6006
5853#define GPU_IDLE_TIMEOUT 500 /* ms */
5854
5855/* When this timer fires, we've been idle for awhile */
5856static void intel_gpu_idle_timer(unsigned long arg)
5857{
5858 struct drm_device *dev = (struct drm_device *)arg;
5859 drm_i915_private_t *dev_priv = dev->dev_private;
5860
5861 if (!list_empty(&dev_priv->mm.active_list)) {
5862 /* Still processing requests, so just re-arm the timer. */
5863 mod_timer(&dev_priv->idle_timer, jiffies +
5864 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5865 return;
5866 }
5867
5868 dev_priv->busy = false;
5869 queue_work(dev_priv->wq, &dev_priv->idle_work);
5870}
5871
5872#define CRTC_IDLE_TIMEOUT 1000 /* ms */
5873
5874static void intel_crtc_idle_timer(unsigned long arg)
5875{
5876 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5877 struct drm_crtc *crtc = &intel_crtc->base;
5878 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5879 struct intel_framebuffer *intel_fb;
5880
5881 intel_fb = to_intel_framebuffer(crtc->fb);
5882 if (intel_fb && intel_fb->obj->active) {
5883 /* The framebuffer is still being accessed by the GPU. */
5884 mod_timer(&intel_crtc->idle_timer, jiffies +
5885 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5886 return;
5887 }
5888
5889 intel_crtc->busy = false;
5890 queue_work(dev_priv->wq, &dev_priv->idle_work);
5891}
5892
5893static void intel_increase_pllclock(struct drm_crtc *crtc) 6007static void intel_increase_pllclock(struct drm_crtc *crtc)
5894{ 6008{
5895 struct drm_device *dev = crtc->dev; 6009 struct drm_device *dev = crtc->dev;
@@ -5919,10 +6033,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
5919 if (dpll & DISPLAY_RATE_SELECT_FPA1) 6033 if (dpll & DISPLAY_RATE_SELECT_FPA1)
5920 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); 6034 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5921 } 6035 }
5922
5923 /* Schedule downclock */
5924 mod_timer(&intel_crtc->idle_timer, jiffies +
5925 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5926} 6036}
5927 6037
5928static void intel_decrease_pllclock(struct drm_crtc *crtc) 6038static void intel_decrease_pllclock(struct drm_crtc *crtc)
@@ -5961,89 +6071,46 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
5961 6071
5962} 6072}
5963 6073
5964/** 6074void intel_mark_busy(struct drm_device *dev)
5965 * intel_idle_update - adjust clocks for idleness
5966 * @work: work struct
5967 *
5968 * Either the GPU or display (or both) went idle. Check the busy status
5969 * here and adjust the CRTC and GPU clocks as necessary.
5970 */
5971static void intel_idle_update(struct work_struct *work)
5972{ 6075{
5973 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 6076 i915_update_gfx_val(dev->dev_private);
5974 idle_work); 6077}
5975 struct drm_device *dev = dev_priv->dev; 6078
6079void intel_mark_idle(struct drm_device *dev)
6080{
6081}
6082
6083void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6084{
6085 struct drm_device *dev = obj->base.dev;
5976 struct drm_crtc *crtc; 6086 struct drm_crtc *crtc;
5977 struct intel_crtc *intel_crtc;
5978 6087
5979 if (!i915_powersave) 6088 if (!i915_powersave)
5980 return; 6089 return;
5981 6090
5982 mutex_lock(&dev->struct_mutex);
5983
5984 i915_update_gfx_val(dev_priv);
5985
5986 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6091 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5987 /* Skip inactive CRTCs */
5988 if (!crtc->fb) 6092 if (!crtc->fb)
5989 continue; 6093 continue;
5990 6094
5991 intel_crtc = to_intel_crtc(crtc); 6095 if (to_intel_framebuffer(crtc->fb)->obj == obj)
5992 if (!intel_crtc->busy) 6096 intel_increase_pllclock(crtc);
5993 intel_decrease_pllclock(crtc);
5994 } 6097 }
5995
5996
5997 mutex_unlock(&dev->struct_mutex);
5998} 6098}
5999 6099
6000/** 6100void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
6001 * intel_mark_busy - mark the GPU and possibly the display busy
6002 * @dev: drm device
6003 * @obj: object we're operating on
6004 *
6005 * Callers can use this function to indicate that the GPU is busy processing
6006 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
6007 * buffer), we'll also mark the display as busy, so we know to increase its
6008 * clock frequency.
6009 */
6010void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
6011{ 6101{
6012 drm_i915_private_t *dev_priv = dev->dev_private; 6102 struct drm_device *dev = obj->base.dev;
6013 struct drm_crtc *crtc = NULL; 6103 struct drm_crtc *crtc;
6014 struct intel_framebuffer *intel_fb;
6015 struct intel_crtc *intel_crtc;
6016
6017 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6018 return;
6019
6020 if (!dev_priv->busy) {
6021 intel_sanitize_pm(dev);
6022 dev_priv->busy = true;
6023 } else
6024 mod_timer(&dev_priv->idle_timer, jiffies +
6025 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
6026 6104
6027 if (obj == NULL) 6105 if (!i915_powersave)
6028 return; 6106 return;
6029 6107
6030 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6108 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6031 if (!crtc->fb) 6109 if (!crtc->fb)
6032 continue; 6110 continue;
6033 6111
6034 intel_crtc = to_intel_crtc(crtc); 6112 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6035 intel_fb = to_intel_framebuffer(crtc->fb); 6113 intel_decrease_pllclock(crtc);
6036 if (intel_fb->obj == obj) {
6037 if (!intel_crtc->busy) {
6038 /* Non-busy -> busy, upclock */
6039 intel_increase_pllclock(crtc);
6040 intel_crtc->busy = true;
6041 } else {
6042 /* Busy -> busy, put off timer */
6043 mod_timer(&intel_crtc->idle_timer, jiffies +
6044 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6045 }
6046 }
6047 } 6114 }
6048} 6115}
6049 6116
@@ -6394,7 +6461,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6394 default: 6461 default:
6395 WARN_ONCE(1, "unknown plane in flip command\n"); 6462 WARN_ONCE(1, "unknown plane in flip command\n");
6396 ret = -ENODEV; 6463 ret = -ENODEV;
6397 goto err; 6464 goto err_unpin;
6398 } 6465 }
6399 6466
6400 ret = intel_ring_begin(ring, 4); 6467 ret = intel_ring_begin(ring, 4);
@@ -6502,7 +6569,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6502 goto cleanup_pending; 6569 goto cleanup_pending;
6503 6570
6504 intel_disable_fbc(dev); 6571 intel_disable_fbc(dev);
6505 intel_mark_busy(dev, obj); 6572 intel_mark_fb_busy(obj);
6506 mutex_unlock(&dev->struct_mutex); 6573 mutex_unlock(&dev->struct_mutex);
6507 6574
6508 trace_i915_flip_request(intel_crtc->plane, obj); 6575 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6527,81 +6594,807 @@ free_work:
6527 return ret; 6594 return ret;
6528} 6595}
6529 6596
6530static void intel_sanitize_modesetting(struct drm_device *dev, 6597static struct drm_crtc_helper_funcs intel_helper_funcs = {
6531 int pipe, int plane) 6598 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6599 .load_lut = intel_crtc_load_lut,
6600 .disable = intel_crtc_noop,
6601};
6602
6603bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
6532{ 6604{
6533 struct drm_i915_private *dev_priv = dev->dev_private; 6605 struct intel_encoder *other_encoder;
6534 u32 reg, val; 6606 struct drm_crtc *crtc = &encoder->new_crtc->base;
6535 int i;
6536 6607
6537 /* Clear any frame start delays used for debugging left by the BIOS */ 6608 if (WARN_ON(!crtc))
6538 for_each_pipe(i) { 6609 return false;
6539 reg = PIPECONF(i); 6610
6540 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 6611 list_for_each_entry(other_encoder,
6612 &crtc->dev->mode_config.encoder_list,
6613 base.head) {
6614
6615 if (&other_encoder->new_crtc->base != crtc ||
6616 encoder == other_encoder)
6617 continue;
6618 else
6619 return true;
6541 } 6620 }
6542 6621
6543 if (HAS_PCH_SPLIT(dev)) 6622 return false;
6544 return; 6623}
6545 6624
6546 /* Who knows what state these registers were left in by the BIOS or 6625static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
6547 * grub? 6626 struct drm_crtc *crtc)
6548 * 6627{
6549 * If we leave the registers in a conflicting state (e.g. with the 6628 struct drm_device *dev;
6550 * display plane reading from the other pipe than the one we intend 6629 struct drm_crtc *tmp;
6551 * to use) then when we attempt to teardown the active mode, we will 6630 int crtc_mask = 1;
6552 * not disable the pipes and planes in the correct order -- leaving 6631
6553 * a plane reading from a disabled pipe and possibly leading to 6632 WARN(!crtc, "checking null crtc?\n");
6554 * undefined behaviour. 6633
6634 dev = crtc->dev;
6635
6636 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
6637 if (tmp == crtc)
6638 break;
6639 crtc_mask <<= 1;
6640 }
6641
6642 if (encoder->possible_crtcs & crtc_mask)
6643 return true;
6644 return false;
6645}
6646
6647/**
6648 * intel_modeset_update_staged_output_state
6649 *
6650 * Updates the staged output configuration state, e.g. after we've read out the
6651 * current hw state.
6652 */
6653static void intel_modeset_update_staged_output_state(struct drm_device *dev)
6654{
6655 struct intel_encoder *encoder;
6656 struct intel_connector *connector;
6657
6658 list_for_each_entry(connector, &dev->mode_config.connector_list,
6659 base.head) {
6660 connector->new_encoder =
6661 to_intel_encoder(connector->base.encoder);
6662 }
6663
6664 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6665 base.head) {
6666 encoder->new_crtc =
6667 to_intel_crtc(encoder->base.crtc);
6668 }
6669}
6670
6671/**
6672 * intel_modeset_commit_output_state
6673 *
6674 * This function copies the stage display pipe configuration to the real one.
6675 */
6676static void intel_modeset_commit_output_state(struct drm_device *dev)
6677{
6678 struct intel_encoder *encoder;
6679 struct intel_connector *connector;
6680
6681 list_for_each_entry(connector, &dev->mode_config.connector_list,
6682 base.head) {
6683 connector->base.encoder = &connector->new_encoder->base;
6684 }
6685
6686 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6687 base.head) {
6688 encoder->base.crtc = &encoder->new_crtc->base;
6689 }
6690}
6691
6692static struct drm_display_mode *
6693intel_modeset_adjusted_mode(struct drm_crtc *crtc,
6694 struct drm_display_mode *mode)
6695{
6696 struct drm_device *dev = crtc->dev;
6697 struct drm_display_mode *adjusted_mode;
6698 struct drm_encoder_helper_funcs *encoder_funcs;
6699 struct intel_encoder *encoder;
6700
6701 adjusted_mode = drm_mode_duplicate(dev, mode);
6702 if (!adjusted_mode)
6703 return ERR_PTR(-ENOMEM);
6704
6705 /* Pass our mode to the connectors and the CRTC to give them a chance to
6706 * adjust it according to limitations or connector properties, and also
6707 * a chance to reject the mode entirely.
6555 */ 6708 */
6709 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6710 base.head) {
6556 6711
6557 reg = DSPCNTR(plane); 6712 if (&encoder->new_crtc->base != crtc)
6558 val = I915_READ(reg); 6713 continue;
6714 encoder_funcs = encoder->base.helper_private;
6715 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
6716 adjusted_mode))) {
6717 DRM_DEBUG_KMS("Encoder fixup failed\n");
6718 goto fail;
6719 }
6720 }
6559 6721
6560 if ((val & DISPLAY_PLANE_ENABLE) == 0) 6722 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
6561 return; 6723 DRM_DEBUG_KMS("CRTC fixup failed\n");
6562 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) 6724 goto fail;
6563 return; 6725 }
6726 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
6564 6727
6565 /* This display plane is active and attached to the other CPU pipe. */ 6728 return adjusted_mode;
6566 pipe = !pipe; 6729fail:
6730 drm_mode_destroy(dev, adjusted_mode);
6731 return ERR_PTR(-EINVAL);
6732}
6567 6733
6568 /* Disable the plane and wait for it to stop reading from the pipe. */ 6734/* Computes which crtcs are affected and sets the relevant bits in the mask. For
6569 intel_disable_plane(dev_priv, plane, pipe); 6735 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
6570 intel_disable_pipe(dev_priv, pipe); 6736static void
6737intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
6738 unsigned *prepare_pipes, unsigned *disable_pipes)
6739{
6740 struct intel_crtc *intel_crtc;
6741 struct drm_device *dev = crtc->dev;
6742 struct intel_encoder *encoder;
6743 struct intel_connector *connector;
6744 struct drm_crtc *tmp_crtc;
6745
6746 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
6747
6748 /* Check which crtcs have changed outputs connected to them, these need
6749 * to be part of the prepare_pipes mask. We don't (yet) support global
6750 * modeset across multiple crtcs, so modeset_pipes will only have one
6751 * bit set at most. */
6752 list_for_each_entry(connector, &dev->mode_config.connector_list,
6753 base.head) {
6754 if (connector->base.encoder == &connector->new_encoder->base)
6755 continue;
6756
6757 if (connector->base.encoder) {
6758 tmp_crtc = connector->base.encoder->crtc;
6759
6760 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
6761 }
6762
6763 if (connector->new_encoder)
6764 *prepare_pipes |=
6765 1 << connector->new_encoder->new_crtc->pipe;
6766 }
6767
6768 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6769 base.head) {
6770 if (encoder->base.crtc == &encoder->new_crtc->base)
6771 continue;
6772
6773 if (encoder->base.crtc) {
6774 tmp_crtc = encoder->base.crtc;
6775
6776 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
6777 }
6778
6779 if (encoder->new_crtc)
6780 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
6781 }
6782
6783 /* Check for any pipes that will be fully disabled ... */
6784 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
6785 base.head) {
6786 bool used = false;
6787
6788 /* Don't try to disable disabled crtcs. */
6789 if (!intel_crtc->base.enabled)
6790 continue;
6791
6792 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6793 base.head) {
6794 if (encoder->new_crtc == intel_crtc)
6795 used = true;
6796 }
6797
6798 if (!used)
6799 *disable_pipes |= 1 << intel_crtc->pipe;
6800 }
6801
6802
6803 /* set_mode is also used to update properties on life display pipes. */
6804 intel_crtc = to_intel_crtc(crtc);
6805 if (crtc->enabled)
6806 *prepare_pipes |= 1 << intel_crtc->pipe;
6807
6808 /* We only support modeset on one single crtc, hence we need to do that
6809 * only for the passed in crtc iff we change anything else than just
6810 * disable crtcs.
6811 *
6812 * This is actually not true, to be fully compatible with the old crtc
6813 * helper we automatically disable _any_ output (i.e. doesn't need to be
6814 * connected to the crtc we're modesetting on) if it's disconnected.
6815 * Which is a rather nutty api (since changed the output configuration
6816 * without userspace's explicit request can lead to confusion), but
6817 * alas. Hence we currently need to modeset on all pipes we prepare. */
6818 if (*prepare_pipes)
6819 *modeset_pipes = *prepare_pipes;
6820
6821 /* ... and mask these out. */
6822 *modeset_pipes &= ~(*disable_pipes);
6823 *prepare_pipes &= ~(*disable_pipes);
6571} 6824}
6572 6825
6573static void intel_crtc_reset(struct drm_crtc *crtc) 6826static bool intel_crtc_in_use(struct drm_crtc *crtc)
6574{ 6827{
6828 struct drm_encoder *encoder;
6575 struct drm_device *dev = crtc->dev; 6829 struct drm_device *dev = crtc->dev;
6576 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6577 6830
6578 /* Reset flags back to the 'unknown' status so that they 6831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
6579 * will be correctly set on the initial modeset. 6832 if (encoder->crtc == crtc)
6833 return true;
6834
6835 return false;
6836}
6837
6838static void
6839intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
6840{
6841 struct intel_encoder *intel_encoder;
6842 struct intel_crtc *intel_crtc;
6843 struct drm_connector *connector;
6844
6845 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
6846 base.head) {
6847 if (!intel_encoder->base.crtc)
6848 continue;
6849
6850 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
6851
6852 if (prepare_pipes & (1 << intel_crtc->pipe))
6853 intel_encoder->connectors_active = false;
6854 }
6855
6856 intel_modeset_commit_output_state(dev);
6857
6858 /* Update computed state. */
6859 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
6860 base.head) {
6861 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
6862 }
6863
6864 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
6865 if (!connector->encoder || !connector->encoder->crtc)
6866 continue;
6867
6868 intel_crtc = to_intel_crtc(connector->encoder->crtc);
6869
6870 if (prepare_pipes & (1 << intel_crtc->pipe)) {
6871 struct drm_property *dpms_property =
6872 dev->mode_config.dpms_property;
6873
6874 connector->dpms = DRM_MODE_DPMS_ON;
6875 drm_connector_property_set_value(connector,
6876 dpms_property,
6877 DRM_MODE_DPMS_ON);
6878
6879 intel_encoder = to_intel_encoder(connector->encoder);
6880 intel_encoder->connectors_active = true;
6881 }
6882 }
6883
6884}
6885
6886#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
6887 list_for_each_entry((intel_crtc), \
6888 &(dev)->mode_config.crtc_list, \
6889 base.head) \
6890 if (mask & (1 <<(intel_crtc)->pipe)) \
6891
6892void
6893intel_modeset_check_state(struct drm_device *dev)
6894{
6895 struct intel_crtc *crtc;
6896 struct intel_encoder *encoder;
6897 struct intel_connector *connector;
6898
6899 list_for_each_entry(connector, &dev->mode_config.connector_list,
6900 base.head) {
6901 /* This also checks the encoder/connector hw state with the
6902 * ->get_hw_state callbacks. */
6903 intel_connector_check_state(connector);
6904
6905 WARN(&connector->new_encoder->base != connector->base.encoder,
6906 "connector's staged encoder doesn't match current encoder\n");
6907 }
6908
6909 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6910 base.head) {
6911 bool enabled = false;
6912 bool active = false;
6913 enum pipe pipe, tracked_pipe;
6914
6915 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
6916 encoder->base.base.id,
6917 drm_get_encoder_name(&encoder->base));
6918
6919 WARN(&encoder->new_crtc->base != encoder->base.crtc,
6920 "encoder's stage crtc doesn't match current crtc\n");
6921 WARN(encoder->connectors_active && !encoder->base.crtc,
6922 "encoder's active_connectors set, but no crtc\n");
6923
6924 list_for_each_entry(connector, &dev->mode_config.connector_list,
6925 base.head) {
6926 if (connector->base.encoder != &encoder->base)
6927 continue;
6928 enabled = true;
6929 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
6930 active = true;
6931 }
6932 WARN(!!encoder->base.crtc != enabled,
6933 "encoder's enabled state mismatch "
6934 "(expected %i, found %i)\n",
6935 !!encoder->base.crtc, enabled);
6936 WARN(active && !encoder->base.crtc,
6937 "active encoder with no crtc\n");
6938
6939 WARN(encoder->connectors_active != active,
6940 "encoder's computed active state doesn't match tracked active state "
6941 "(expected %i, found %i)\n", active, encoder->connectors_active);
6942
6943 active = encoder->get_hw_state(encoder, &pipe);
6944 WARN(active != encoder->connectors_active,
6945 "encoder's hw state doesn't match sw tracking "
6946 "(expected %i, found %i)\n",
6947 encoder->connectors_active, active);
6948
6949 if (!encoder->base.crtc)
6950 continue;
6951
6952 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
6953 WARN(active && pipe != tracked_pipe,
6954 "active encoder's pipe doesn't match"
6955 "(expected %i, found %i)\n",
6956 tracked_pipe, pipe);
6957
6958 }
6959
6960 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
6961 base.head) {
6962 bool enabled = false;
6963 bool active = false;
6964
6965 DRM_DEBUG_KMS("[CRTC:%d]\n",
6966 crtc->base.base.id);
6967
6968 WARN(crtc->active && !crtc->base.enabled,
6969 "active crtc, but not enabled in sw tracking\n");
6970
6971 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
6972 base.head) {
6973 if (encoder->base.crtc != &crtc->base)
6974 continue;
6975 enabled = true;
6976 if (encoder->connectors_active)
6977 active = true;
6978 }
6979 WARN(active != crtc->active,
6980 "crtc's computed active state doesn't match tracked active state "
6981 "(expected %i, found %i)\n", active, crtc->active);
6982 WARN(enabled != crtc->base.enabled,
6983 "crtc's computed enabled state doesn't match tracked enabled state "
6984 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
6985
6986 assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
6987 }
6988}
6989
6990bool intel_set_mode(struct drm_crtc *crtc,
6991 struct drm_display_mode *mode,
6992 int x, int y, struct drm_framebuffer *fb)
6993{
6994 struct drm_device *dev = crtc->dev;
6995 drm_i915_private_t *dev_priv = dev->dev_private;
6996 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
6997 struct drm_encoder_helper_funcs *encoder_funcs;
6998 struct drm_encoder *encoder;
6999 struct intel_crtc *intel_crtc;
7000 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7001 bool ret = true;
7002
7003 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7004 &prepare_pipes, &disable_pipes);
7005
7006 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7007 modeset_pipes, prepare_pipes, disable_pipes);
7008
7009 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7010 intel_crtc_disable(&intel_crtc->base);
7011
7012 saved_hwmode = crtc->hwmode;
7013 saved_mode = crtc->mode;
7014
7015 /* Hack: Because we don't (yet) support global modeset on multiple
7016 * crtcs, we don't keep track of the new mode for more than one crtc.
7017 * Hence simply check whether any bit is set in modeset_pipes in all the
7018 * pieces of code that are not yet converted to deal with mutliple crtcs
7019 * changing their mode at the same time. */
7020 adjusted_mode = NULL;
7021 if (modeset_pipes) {
7022 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7023 if (IS_ERR(adjusted_mode)) {
7024 return false;
7025 }
7026 }
7027
7028 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7029 if (intel_crtc->base.enabled)
7030 dev_priv->display.crtc_disable(&intel_crtc->base);
7031 }
7032
7033 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7034 * to set it here already despite that we pass it down the callchain.
6580 */ 7035 */
6581 intel_crtc->dpms_mode = -1; 7036 if (modeset_pipes)
7037 crtc->mode = *mode;
6582 7038
6583 /* We need to fix up any BIOS configuration that conflicts with 7039 /* Only after disabling all output pipelines that will be changed can we
6584 * our expectations. 7040 * update the the output configuration. */
7041 intel_modeset_update_state(dev, prepare_pipes);
7042
7043 /* Set up the DPLL and any encoders state that needs to adjust or depend
7044 * on the DPLL.
6585 */ 7045 */
6586 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); 7046 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7047 ret = !intel_crtc_mode_set(&intel_crtc->base,
7048 mode, adjusted_mode,
7049 x, y, fb);
7050 if (!ret)
7051 goto done;
7052
7053 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7054
7055 if (encoder->crtc != &intel_crtc->base)
7056 continue;
7057
7058 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7059 encoder->base.id, drm_get_encoder_name(encoder),
7060 mode->base.id, mode->name);
7061 encoder_funcs = encoder->helper_private;
7062 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
7063 }
7064 }
7065
7066 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7067 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7068 dev_priv->display.crtc_enable(&intel_crtc->base);
7069
7070 if (modeset_pipes) {
7071 /* Store real post-adjustment hardware mode. */
7072 crtc->hwmode = *adjusted_mode;
7073
7074 /* Calculate and store various constants which
7075 * are later needed by vblank and swap-completion
7076 * timestamping. They are derived from true hwmode.
7077 */
7078 drm_calc_timestamping_constants(crtc);
7079 }
7080
7081 /* FIXME: add subpixel order */
7082done:
7083 drm_mode_destroy(dev, adjusted_mode);
7084 if (!ret && crtc->enabled) {
7085 crtc->hwmode = saved_hwmode;
7086 crtc->mode = saved_mode;
7087 } else {
7088 intel_modeset_check_state(dev);
7089 }
7090
7091 return ret;
6587} 7092}
6588 7093
6589static struct drm_crtc_helper_funcs intel_helper_funcs = { 7094#undef for_each_intel_crtc_masked
6590 .dpms = intel_crtc_dpms, 7095
6591 .mode_fixup = intel_crtc_mode_fixup, 7096static void intel_set_config_free(struct intel_set_config *config)
6592 .mode_set = intel_crtc_mode_set, 7097{
6593 .mode_set_base = intel_pipe_set_base, 7098 if (!config)
6594 .mode_set_base_atomic = intel_pipe_set_base_atomic, 7099 return;
6595 .load_lut = intel_crtc_load_lut, 7100
6596 .disable = intel_crtc_disable, 7101 kfree(config->save_connector_encoders);
6597}; 7102 kfree(config->save_encoder_crtcs);
7103 kfree(config);
7104}
7105
7106static int intel_set_config_save_state(struct drm_device *dev,
7107 struct intel_set_config *config)
7108{
7109 struct drm_encoder *encoder;
7110 struct drm_connector *connector;
7111 int count;
7112
7113 config->save_encoder_crtcs =
7114 kcalloc(dev->mode_config.num_encoder,
7115 sizeof(struct drm_crtc *), GFP_KERNEL);
7116 if (!config->save_encoder_crtcs)
7117 return -ENOMEM;
7118
7119 config->save_connector_encoders =
7120 kcalloc(dev->mode_config.num_connector,
7121 sizeof(struct drm_encoder *), GFP_KERNEL);
7122 if (!config->save_connector_encoders)
7123 return -ENOMEM;
7124
7125 /* Copy data. Note that driver private data is not affected.
7126 * Should anything bad happen only the expected state is
7127 * restored, not the drivers personal bookkeeping.
7128 */
7129 count = 0;
7130 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7131 config->save_encoder_crtcs[count++] = encoder->crtc;
7132 }
7133
7134 count = 0;
7135 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7136 config->save_connector_encoders[count++] = connector->encoder;
7137 }
7138
7139 return 0;
7140}
7141
7142static void intel_set_config_restore_state(struct drm_device *dev,
7143 struct intel_set_config *config)
7144{
7145 struct intel_encoder *encoder;
7146 struct intel_connector *connector;
7147 int count;
7148
7149 count = 0;
7150 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7151 encoder->new_crtc =
7152 to_intel_crtc(config->save_encoder_crtcs[count++]);
7153 }
7154
7155 count = 0;
7156 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
7157 connector->new_encoder =
7158 to_intel_encoder(config->save_connector_encoders[count++]);
7159 }
7160}
7161
7162static void
7163intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7164 struct intel_set_config *config)
7165{
7166
7167 /* We should be able to check here if the fb has the same properties
7168 * and then just flip_or_move it */
7169 if (set->crtc->fb != set->fb) {
7170 /* If we have no fb then treat it as a full mode set */
7171 if (set->crtc->fb == NULL) {
7172 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
7173 config->mode_changed = true;
7174 } else if (set->fb == NULL) {
7175 config->mode_changed = true;
7176 } else if (set->fb->depth != set->crtc->fb->depth) {
7177 config->mode_changed = true;
7178 } else if (set->fb->bits_per_pixel !=
7179 set->crtc->fb->bits_per_pixel) {
7180 config->mode_changed = true;
7181 } else
7182 config->fb_changed = true;
7183 }
7184
7185 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
7186 config->fb_changed = true;
7187
7188 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
7189 DRM_DEBUG_KMS("modes are different, full mode set\n");
7190 drm_mode_debug_printmodeline(&set->crtc->mode);
7191 drm_mode_debug_printmodeline(set->mode);
7192 config->mode_changed = true;
7193 }
7194}
7195
7196static int
7197intel_modeset_stage_output_state(struct drm_device *dev,
7198 struct drm_mode_set *set,
7199 struct intel_set_config *config)
7200{
7201 struct drm_crtc *new_crtc;
7202 struct intel_connector *connector;
7203 struct intel_encoder *encoder;
7204 int count, ro;
7205
7206 /* The upper layers ensure that we either disabl a crtc or have a list
7207 * of connectors. For paranoia, double-check this. */
7208 WARN_ON(!set->fb && (set->num_connectors != 0));
7209 WARN_ON(set->fb && (set->num_connectors == 0));
7210
7211 count = 0;
7212 list_for_each_entry(connector, &dev->mode_config.connector_list,
7213 base.head) {
7214 /* Otherwise traverse passed in connector list and get encoders
7215 * for them. */
7216 for (ro = 0; ro < set->num_connectors; ro++) {
7217 if (set->connectors[ro] == &connector->base) {
7218 connector->new_encoder = connector->encoder;
7219 break;
7220 }
7221 }
7222
7223 /* If we disable the crtc, disable all its connectors. Also, if
7224 * the connector is on the changing crtc but not on the new
7225 * connector list, disable it. */
7226 if ((!set->fb || ro == set->num_connectors) &&
7227 connector->base.encoder &&
7228 connector->base.encoder->crtc == set->crtc) {
7229 connector->new_encoder = NULL;
7230
7231 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
7232 connector->base.base.id,
7233 drm_get_connector_name(&connector->base));
7234 }
7235
7236
7237 if (&connector->new_encoder->base != connector->base.encoder) {
7238 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
7239 config->mode_changed = true;
7240 }
7241
7242 /* Disable all disconnected encoders. */
7243 if (connector->base.status == connector_status_disconnected)
7244 connector->new_encoder = NULL;
7245 }
7246 /* connector->new_encoder is now updated for all connectors. */
7247
7248 /* Update crtc of enabled connectors. */
7249 count = 0;
7250 list_for_each_entry(connector, &dev->mode_config.connector_list,
7251 base.head) {
7252 if (!connector->new_encoder)
7253 continue;
7254
7255 new_crtc = connector->new_encoder->base.crtc;
7256
7257 for (ro = 0; ro < set->num_connectors; ro++) {
7258 if (set->connectors[ro] == &connector->base)
7259 new_crtc = set->crtc;
7260 }
7261
7262 /* Make sure the new CRTC will work with the encoder */
7263 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
7264 new_crtc)) {
7265 return -EINVAL;
7266 }
7267 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
7268
7269 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
7270 connector->base.base.id,
7271 drm_get_connector_name(&connector->base),
7272 new_crtc->base.id);
7273 }
7274
7275 /* Check for any encoders that needs to be disabled. */
7276 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7277 base.head) {
7278 list_for_each_entry(connector,
7279 &dev->mode_config.connector_list,
7280 base.head) {
7281 if (connector->new_encoder == encoder) {
7282 WARN_ON(!connector->new_encoder->new_crtc);
7283
7284 goto next_encoder;
7285 }
7286 }
7287 encoder->new_crtc = NULL;
7288next_encoder:
7289 /* Only now check for crtc changes so we don't miss encoders
7290 * that will be disabled. */
7291 if (&encoder->new_crtc->base != encoder->base.crtc) {
7292 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
7293 config->mode_changed = true;
7294 }
7295 }
7296 /* Now we've also updated encoder->new_crtc for all encoders. */
7297
7298 return 0;
7299}
7300
7301static int intel_crtc_set_config(struct drm_mode_set *set)
7302{
7303 struct drm_device *dev;
7304 struct drm_mode_set save_set;
7305 struct intel_set_config *config;
7306 int ret;
7307
7308 BUG_ON(!set);
7309 BUG_ON(!set->crtc);
7310 BUG_ON(!set->crtc->helper_private);
7311
7312 if (!set->mode)
7313 set->fb = NULL;
7314
7315 /* The fb helper likes to play gross jokes with ->mode_set_config.
7316 * Unfortunately the crtc helper doesn't do much at all for this case,
7317 * so we have to cope with this madness until the fb helper is fixed up. */
7318 if (set->fb && set->num_connectors == 0)
7319 return 0;
7320
7321 if (set->fb) {
7322 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
7323 set->crtc->base.id, set->fb->base.id,
7324 (int)set->num_connectors, set->x, set->y);
7325 } else {
7326 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
7327 }
7328
7329 dev = set->crtc->dev;
7330
7331 ret = -ENOMEM;
7332 config = kzalloc(sizeof(*config), GFP_KERNEL);
7333 if (!config)
7334 goto out_config;
7335
7336 ret = intel_set_config_save_state(dev, config);
7337 if (ret)
7338 goto out_config;
7339
7340 save_set.crtc = set->crtc;
7341 save_set.mode = &set->crtc->mode;
7342 save_set.x = set->crtc->x;
7343 save_set.y = set->crtc->y;
7344 save_set.fb = set->crtc->fb;
7345
7346 /* Compute whether we need a full modeset, only an fb base update or no
7347 * change at all. In the future we might also check whether only the
7348 * mode changed, e.g. for LVDS where we only change the panel fitter in
7349 * such cases. */
7350 intel_set_config_compute_mode_changes(set, config);
7351
7352 ret = intel_modeset_stage_output_state(dev, set, config);
7353 if (ret)
7354 goto fail;
7355
7356 if (config->mode_changed) {
7357 if (set->mode) {
7358 DRM_DEBUG_KMS("attempting to set mode from"
7359 " userspace\n");
7360 drm_mode_debug_printmodeline(set->mode);
7361 }
7362
7363 if (!intel_set_mode(set->crtc, set->mode,
7364 set->x, set->y, set->fb)) {
7365 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
7366 set->crtc->base.id);
7367 ret = -EINVAL;
7368 goto fail;
7369 }
7370 } else if (config->fb_changed) {
7371 ret = intel_pipe_set_base(set->crtc,
7372 set->x, set->y, set->fb);
7373 }
7374
7375 intel_set_config_free(config);
7376
7377 return 0;
7378
7379fail:
7380 intel_set_config_restore_state(dev, config);
7381
7382 /* Try to restore the config */
7383 if (config->mode_changed &&
7384 !intel_set_mode(save_set.crtc, save_set.mode,
7385 save_set.x, save_set.y, save_set.fb))
7386 DRM_ERROR("failed to restore config after modeset failure\n");
7387
7388out_config:
7389 intel_set_config_free(config);
7390 return ret;
7391}
6598 7392
6599static const struct drm_crtc_funcs intel_crtc_funcs = { 7393static const struct drm_crtc_funcs intel_crtc_funcs = {
6600 .reset = intel_crtc_reset,
6601 .cursor_set = intel_crtc_cursor_set, 7394 .cursor_set = intel_crtc_cursor_set,
6602 .cursor_move = intel_crtc_cursor_move, 7395 .cursor_move = intel_crtc_cursor_move,
6603 .gamma_set = intel_crtc_gamma_set, 7396 .gamma_set = intel_crtc_gamma_set,
6604 .set_config = drm_crtc_helper_set_config, 7397 .set_config = intel_crtc_set_config,
6605 .destroy = intel_crtc_destroy, 7398 .destroy = intel_crtc_destroy,
6606 .page_flip = intel_crtc_page_flip, 7399 .page_flip = intel_crtc_page_flip,
6607}; 7400};
@@ -6655,24 +7448,9 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6655 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 7448 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6656 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 7449 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6657 7450
6658 intel_crtc_reset(&intel_crtc->base);
6659 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6660 intel_crtc->bpp = 24; /* default for pre-Ironlake */ 7451 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6661 7452
6662 if (HAS_PCH_SPLIT(dev)) {
6663 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6664 intel_helper_funcs.commit = ironlake_crtc_commit;
6665 } else {
6666 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6667 intel_helper_funcs.commit = i9xx_crtc_commit;
6668 }
6669
6670 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 7453 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6671
6672 intel_crtc->busy = false;
6673
6674 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6675 (unsigned long)intel_crtc);
6676} 7454}
6677 7455
6678int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 7456int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -6699,15 +7477,23 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
6699 return 0; 7477 return 0;
6700} 7478}
6701 7479
6702static int intel_encoder_clones(struct drm_device *dev, int type_mask) 7480static int intel_encoder_clones(struct intel_encoder *encoder)
6703{ 7481{
6704 struct intel_encoder *encoder; 7482 struct drm_device *dev = encoder->base.dev;
7483 struct intel_encoder *source_encoder;
6705 int index_mask = 0; 7484 int index_mask = 0;
6706 int entry = 0; 7485 int entry = 0;
6707 7486
6708 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 7487 list_for_each_entry(source_encoder,
6709 if (type_mask & encoder->clone_mask) 7488 &dev->mode_config.encoder_list, base.head) {
7489
7490 if (encoder == source_encoder)
6710 index_mask |= (1 << entry); 7491 index_mask |= (1 << entry);
7492
7493 /* Intel hw has only one MUX where enocoders could be cloned. */
7494 if (encoder->cloneable && source_encoder->cloneable)
7495 index_mask |= (1 << entry);
7496
6711 entry++; 7497 entry++;
6712 } 7498 }
6713 7499
@@ -6748,10 +7534,10 @@ static void intel_setup_outputs(struct drm_device *dev)
6748 dpd_is_edp = intel_dpd_is_edp(dev); 7534 dpd_is_edp = intel_dpd_is_edp(dev);
6749 7535
6750 if (has_edp_a(dev)) 7536 if (has_edp_a(dev))
6751 intel_dp_init(dev, DP_A); 7537 intel_dp_init(dev, DP_A, PORT_A);
6752 7538
6753 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 7539 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6754 intel_dp_init(dev, PCH_DP_D); 7540 intel_dp_init(dev, PCH_DP_D, PORT_D);
6755 } 7541 }
6756 7542
6757 intel_crt_init(dev); 7543 intel_crt_init(dev);
@@ -6782,22 +7568,22 @@ static void intel_setup_outputs(struct drm_device *dev)
6782 /* PCH SDVOB multiplex with HDMIB */ 7568 /* PCH SDVOB multiplex with HDMIB */
6783 found = intel_sdvo_init(dev, PCH_SDVOB, true); 7569 found = intel_sdvo_init(dev, PCH_SDVOB, true);
6784 if (!found) 7570 if (!found)
6785 intel_hdmi_init(dev, HDMIB); 7571 intel_hdmi_init(dev, HDMIB, PORT_B);
6786 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 7572 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6787 intel_dp_init(dev, PCH_DP_B); 7573 intel_dp_init(dev, PCH_DP_B, PORT_B);
6788 } 7574 }
6789 7575
6790 if (I915_READ(HDMIC) & PORT_DETECTED) 7576 if (I915_READ(HDMIC) & PORT_DETECTED)
6791 intel_hdmi_init(dev, HDMIC); 7577 intel_hdmi_init(dev, HDMIC, PORT_C);
6792 7578
6793 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 7579 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
6794 intel_hdmi_init(dev, HDMID); 7580 intel_hdmi_init(dev, HDMID, PORT_D);
6795 7581
6796 if (I915_READ(PCH_DP_C) & DP_DETECTED) 7582 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6797 intel_dp_init(dev, PCH_DP_C); 7583 intel_dp_init(dev, PCH_DP_C, PORT_C);
6798 7584
6799 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 7585 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6800 intel_dp_init(dev, PCH_DP_D); 7586 intel_dp_init(dev, PCH_DP_D, PORT_D);
6801 } else if (IS_VALLEYVIEW(dev)) { 7587 } else if (IS_VALLEYVIEW(dev)) {
6802 int found; 7588 int found;
6803 7589
@@ -6805,17 +7591,17 @@ static void intel_setup_outputs(struct drm_device *dev)
6805 /* SDVOB multiplex with HDMIB */ 7591 /* SDVOB multiplex with HDMIB */
6806 found = intel_sdvo_init(dev, SDVOB, true); 7592 found = intel_sdvo_init(dev, SDVOB, true);
6807 if (!found) 7593 if (!found)
6808 intel_hdmi_init(dev, SDVOB); 7594 intel_hdmi_init(dev, SDVOB, PORT_B);
6809 if (!found && (I915_READ(DP_B) & DP_DETECTED)) 7595 if (!found && (I915_READ(DP_B) & DP_DETECTED))
6810 intel_dp_init(dev, DP_B); 7596 intel_dp_init(dev, DP_B, PORT_B);
6811 } 7597 }
6812 7598
6813 if (I915_READ(SDVOC) & PORT_DETECTED) 7599 if (I915_READ(SDVOC) & PORT_DETECTED)
6814 intel_hdmi_init(dev, SDVOC); 7600 intel_hdmi_init(dev, SDVOC, PORT_C);
6815 7601
6816 /* Shares lanes with HDMI on SDVOC */ 7602 /* Shares lanes with HDMI on SDVOC */
6817 if (I915_READ(DP_C) & DP_DETECTED) 7603 if (I915_READ(DP_C) & DP_DETECTED)
6818 intel_dp_init(dev, DP_C); 7604 intel_dp_init(dev, DP_C, PORT_C);
6819 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 7605 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
6820 bool found = false; 7606 bool found = false;
6821 7607
@@ -6824,12 +7610,12 @@ static void intel_setup_outputs(struct drm_device *dev)
6824 found = intel_sdvo_init(dev, SDVOB, true); 7610 found = intel_sdvo_init(dev, SDVOB, true);
6825 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 7611 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6826 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 7612 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
6827 intel_hdmi_init(dev, SDVOB); 7613 intel_hdmi_init(dev, SDVOB, PORT_B);
6828 } 7614 }
6829 7615
6830 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 7616 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6831 DRM_DEBUG_KMS("probing DP_B\n"); 7617 DRM_DEBUG_KMS("probing DP_B\n");
6832 intel_dp_init(dev, DP_B); 7618 intel_dp_init(dev, DP_B, PORT_B);
6833 } 7619 }
6834 } 7620 }
6835 7621
@@ -6844,18 +7630,18 @@ static void intel_setup_outputs(struct drm_device *dev)
6844 7630
6845 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 7631 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6846 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 7632 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
6847 intel_hdmi_init(dev, SDVOC); 7633 intel_hdmi_init(dev, SDVOC, PORT_C);
6848 } 7634 }
6849 if (SUPPORTS_INTEGRATED_DP(dev)) { 7635 if (SUPPORTS_INTEGRATED_DP(dev)) {
6850 DRM_DEBUG_KMS("probing DP_C\n"); 7636 DRM_DEBUG_KMS("probing DP_C\n");
6851 intel_dp_init(dev, DP_C); 7637 intel_dp_init(dev, DP_C, PORT_C);
6852 } 7638 }
6853 } 7639 }
6854 7640
6855 if (SUPPORTS_INTEGRATED_DP(dev) && 7641 if (SUPPORTS_INTEGRATED_DP(dev) &&
6856 (I915_READ(DP_D) & DP_DETECTED)) { 7642 (I915_READ(DP_D) & DP_DETECTED)) {
6857 DRM_DEBUG_KMS("probing DP_D\n"); 7643 DRM_DEBUG_KMS("probing DP_D\n");
6858 intel_dp_init(dev, DP_D); 7644 intel_dp_init(dev, DP_D, PORT_D);
6859 } 7645 }
6860 } else if (IS_GEN2(dev)) 7646 } else if (IS_GEN2(dev))
6861 intel_dvo_init(dev); 7647 intel_dvo_init(dev);
@@ -6866,12 +7652,9 @@ static void intel_setup_outputs(struct drm_device *dev)
6866 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 7652 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6867 encoder->base.possible_crtcs = encoder->crtc_mask; 7653 encoder->base.possible_crtcs = encoder->crtc_mask;
6868 encoder->base.possible_clones = 7654 encoder->base.possible_clones =
6869 intel_encoder_clones(dev, encoder->clone_mask); 7655 intel_encoder_clones(encoder);
6870 } 7656 }
6871 7657
6872 /* disable all the possible outputs/crtcs before entering KMS mode */
6873 drm_helper_disable_unused_functions(dev);
6874
6875 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 7658 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
6876 ironlake_init_pch_refclk(dev); 7659 ironlake_init_pch_refclk(dev);
6877} 7660}
@@ -6973,13 +7756,15 @@ static void intel_init_display(struct drm_device *dev)
6973 7756
6974 /* We always want a DPMS function */ 7757 /* We always want a DPMS function */
6975 if (HAS_PCH_SPLIT(dev)) { 7758 if (HAS_PCH_SPLIT(dev)) {
6976 dev_priv->display.dpms = ironlake_crtc_dpms;
6977 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7759 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7760 dev_priv->display.crtc_enable = ironlake_crtc_enable;
7761 dev_priv->display.crtc_disable = ironlake_crtc_disable;
6978 dev_priv->display.off = ironlake_crtc_off; 7762 dev_priv->display.off = ironlake_crtc_off;
6979 dev_priv->display.update_plane = ironlake_update_plane; 7763 dev_priv->display.update_plane = ironlake_update_plane;
6980 } else { 7764 } else {
6981 dev_priv->display.dpms = i9xx_crtc_dpms;
6982 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7765 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7766 dev_priv->display.crtc_enable = i9xx_crtc_enable;
7767 dev_priv->display.crtc_disable = i9xx_crtc_disable;
6983 dev_priv->display.off = i9xx_crtc_off; 7768 dev_priv->display.off = i9xx_crtc_off;
6984 dev_priv->display.update_plane = i9xx_update_plane; 7769 dev_priv->display.update_plane = i9xx_update_plane;
6985 } 7770 }
@@ -7023,7 +7808,7 @@ static void intel_init_display(struct drm_device *dev)
7023 dev_priv->display.write_eld = ironlake_write_eld; 7808 dev_priv->display.write_eld = ironlake_write_eld;
7024 } else if (IS_HASWELL(dev)) { 7809 } else if (IS_HASWELL(dev)) {
7025 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 7810 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
7026 dev_priv->display.write_eld = ironlake_write_eld; 7811 dev_priv->display.write_eld = haswell_write_eld;
7027 } else 7812 } else
7028 dev_priv->display.update_wm = NULL; 7813 dev_priv->display.update_wm = NULL;
7029 } else if (IS_G4X(dev)) { 7814 } else if (IS_G4X(dev)) {
@@ -7101,21 +7886,16 @@ static struct intel_quirk intel_quirks[] = {
7101 /* HP Mini needs pipe A force quirk (LP: #322104) */ 7886 /* HP Mini needs pipe A force quirk (LP: #322104) */
7102 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 7887 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
7103 7888
7104 /* Thinkpad R31 needs pipe A force quirk */
7105 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7106 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 7889 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7107 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 7890 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7108 7891
7109 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7110 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
7111 /* ThinkPad X40 needs pipe A force quirk */
7112
7113 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 7892 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7114 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 7893 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7115 7894
7116 /* 855 & before need to leave pipe A & dpll A up */ 7895 /* 855 & before need to leave pipe A & dpll A up */
7117 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7896 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7118 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 7897 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7898 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7119 7899
7120 /* Lenovo U160 cannot use SSC on LVDS */ 7900 /* Lenovo U160 cannot use SSC on LVDS */
7121 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 7901 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
@@ -7231,10 +8011,251 @@ void intel_modeset_init(struct drm_device *dev)
7231 /* Just disable it once at startup */ 8011 /* Just disable it once at startup */
7232 i915_disable_vga(dev); 8012 i915_disable_vga(dev);
7233 intel_setup_outputs(dev); 8013 intel_setup_outputs(dev);
8014}
7234 8015
7235 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 8016static void
7236 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 8017intel_connector_break_all_links(struct intel_connector *connector)
7237 (unsigned long)dev); 8018{
8019 connector->base.dpms = DRM_MODE_DPMS_OFF;
8020 connector->base.encoder = NULL;
8021 connector->encoder->connectors_active = false;
8022 connector->encoder->base.crtc = NULL;
8023}
8024
8025static void intel_enable_pipe_a(struct drm_device *dev)
8026{
8027 struct intel_connector *connector;
8028 struct drm_connector *crt = NULL;
8029 struct intel_load_detect_pipe load_detect_temp;
8030
8031 /* We can't just switch on the pipe A, we need to set things up with a
8032 * proper mode and output configuration. As a gross hack, enable pipe A
8033 * by enabling the load detect pipe once. */
8034 list_for_each_entry(connector,
8035 &dev->mode_config.connector_list,
8036 base.head) {
8037 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8038 crt = &connector->base;
8039 break;
8040 }
8041 }
8042
8043 if (!crt)
8044 return;
8045
8046 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8047 intel_release_load_detect_pipe(crt, &load_detect_temp);
8048
8049
8050}
8051
8052static void intel_sanitize_crtc(struct intel_crtc *crtc)
8053{
8054 struct drm_device *dev = crtc->base.dev;
8055 struct drm_i915_private *dev_priv = dev->dev_private;
8056 u32 reg, val;
8057
8058 /* Clear any frame start delays used for debugging left by the BIOS */
8059 reg = PIPECONF(crtc->pipe);
8060 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8061
8062 /* We need to sanitize the plane -> pipe mapping first because this will
8063 * disable the crtc (and hence change the state) if it is wrong. */
8064 if (!HAS_PCH_SPLIT(dev)) {
8065 struct intel_connector *connector;
8066 bool plane;
8067
8068 reg = DSPCNTR(crtc->plane);
8069 val = I915_READ(reg);
8070
8071 if ((val & DISPLAY_PLANE_ENABLE) == 0 &&
8072 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8073 goto ok;
8074
8075 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8076 crtc->base.base.id);
8077
8078 /* Pipe has the wrong plane attached and the plane is active.
8079 * Temporarily change the plane mapping and disable everything
8080 * ... */
8081 plane = crtc->plane;
8082 crtc->plane = !plane;
8083 dev_priv->display.crtc_disable(&crtc->base);
8084 crtc->plane = plane;
8085
8086 /* ... and break all links. */
8087 list_for_each_entry(connector, &dev->mode_config.connector_list,
8088 base.head) {
8089 if (connector->encoder->base.crtc != &crtc->base)
8090 continue;
8091
8092 intel_connector_break_all_links(connector);
8093 }
8094
8095 WARN_ON(crtc->active);
8096 crtc->base.enabled = false;
8097 }
8098ok:
8099
8100 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8101 crtc->pipe == PIPE_A && !crtc->active) {
8102 /* BIOS forgot to enable pipe A, this mostly happens after
8103 * resume. Force-enable the pipe to fix this, the update_dpms
8104 * call below we restore the pipe to the right state, but leave
8105 * the required bits on. */
8106 intel_enable_pipe_a(dev);
8107 }
8108
8109 /* Adjust the state of the output pipe according to whether we
8110 * have active connectors/encoders. */
8111 intel_crtc_update_dpms(&crtc->base);
8112
8113 if (crtc->active != crtc->base.enabled) {
8114 struct intel_encoder *encoder;
8115
8116 /* This can happen either due to bugs in the get_hw_state
8117 * functions or because the pipe is force-enabled due to the
8118 * pipe A quirk. */
8119 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
8120 crtc->base.base.id,
8121 crtc->base.enabled ? "enabled" : "disabled",
8122 crtc->active ? "enabled" : "disabled");
8123
8124 crtc->base.enabled = crtc->active;
8125
8126 /* Because we only establish the connector -> encoder ->
8127 * crtc links if something is active, this means the
8128 * crtc is now deactivated. Break the links. connector
8129 * -> encoder links are only establish when things are
8130 * actually up, hence no need to break them. */
8131 WARN_ON(crtc->active);
8132
8133 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
8134 WARN_ON(encoder->connectors_active);
8135 encoder->base.crtc = NULL;
8136 }
8137 }
8138}
8139
8140static void intel_sanitize_encoder(struct intel_encoder *encoder)
8141{
8142 struct intel_connector *connector;
8143 struct drm_device *dev = encoder->base.dev;
8144
8145 /* We need to check both for a crtc link (meaning that the
8146 * encoder is active and trying to read from a pipe) and the
8147 * pipe itself being active. */
8148 bool has_active_crtc = encoder->base.crtc &&
8149 to_intel_crtc(encoder->base.crtc)->active;
8150
8151 if (encoder->connectors_active && !has_active_crtc) {
8152 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
8153 encoder->base.base.id,
8154 drm_get_encoder_name(&encoder->base));
8155
8156 /* Connector is active, but has no active pipe. This is
8157 * fallout from our resume register restoring. Disable
8158 * the encoder manually again. */
8159 if (encoder->base.crtc) {
8160 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
8161 encoder->base.base.id,
8162 drm_get_encoder_name(&encoder->base));
8163 encoder->disable(encoder);
8164 }
8165
8166 /* Inconsistent output/port/pipe state happens presumably due to
8167 * a bug in one of the get_hw_state functions. Or someplace else
8168 * in our code, like the register restore mess on resume. Clamp
8169 * things to off as a safer default. */
8170 list_for_each_entry(connector,
8171 &dev->mode_config.connector_list,
8172 base.head) {
8173 if (connector->encoder != encoder)
8174 continue;
8175
8176 intel_connector_break_all_links(connector);
8177 }
8178 }
8179 /* Enabled encoders without active connectors will be fixed in
8180 * the crtc fixup. */
8181}
8182
8183/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8184 * and i915 state tracking structures. */
8185void intel_modeset_setup_hw_state(struct drm_device *dev)
8186{
8187 struct drm_i915_private *dev_priv = dev->dev_private;
8188 enum pipe pipe;
8189 u32 tmp;
8190 struct intel_crtc *crtc;
8191 struct intel_encoder *encoder;
8192 struct intel_connector *connector;
8193
8194 for_each_pipe(pipe) {
8195 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8196
8197 tmp = I915_READ(PIPECONF(pipe));
8198 if (tmp & PIPECONF_ENABLE)
8199 crtc->active = true;
8200 else
8201 crtc->active = false;
8202
8203 crtc->base.enabled = crtc->active;
8204
8205 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
8206 crtc->base.base.id,
8207 crtc->active ? "enabled" : "disabled");
8208 }
8209
8210 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8211 base.head) {
8212 pipe = 0;
8213
8214 if (encoder->get_hw_state(encoder, &pipe)) {
8215 encoder->base.crtc =
8216 dev_priv->pipe_to_crtc_mapping[pipe];
8217 } else {
8218 encoder->base.crtc = NULL;
8219 }
8220
8221 encoder->connectors_active = false;
8222 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
8223 encoder->base.base.id,
8224 drm_get_encoder_name(&encoder->base),
8225 encoder->base.crtc ? "enabled" : "disabled",
8226 pipe);
8227 }
8228
8229 list_for_each_entry(connector, &dev->mode_config.connector_list,
8230 base.head) {
8231 if (connector->get_hw_state(connector)) {
8232 connector->base.dpms = DRM_MODE_DPMS_ON;
8233 connector->encoder->connectors_active = true;
8234 connector->base.encoder = &connector->encoder->base;
8235 } else {
8236 connector->base.dpms = DRM_MODE_DPMS_OFF;
8237 connector->base.encoder = NULL;
8238 }
8239 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
8240 connector->base.base.id,
8241 drm_get_connector_name(&connector->base),
8242 connector->base.encoder ? "enabled" : "disabled");
8243 }
8244
8245 /* HW state is read out, now we need to sanitize this mess. */
8246 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8247 base.head) {
8248 intel_sanitize_encoder(encoder);
8249 }
8250
8251 for_each_pipe(pipe) {
8252 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8253 intel_sanitize_crtc(crtc);
8254 }
8255
8256 intel_modeset_update_staged_output_state(dev);
8257
8258 intel_modeset_check_state(dev);
7238} 8259}
7239 8260
7240void intel_modeset_gem_init(struct drm_device *dev) 8261void intel_modeset_gem_init(struct drm_device *dev)
@@ -7242,6 +8263,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
7242 intel_modeset_init_hw(dev); 8263 intel_modeset_init_hw(dev);
7243 8264
7244 intel_setup_overlay(dev); 8265 intel_setup_overlay(dev);
8266
8267 intel_modeset_setup_hw_state(dev);
7245} 8268}
7246 8269
7247void intel_modeset_cleanup(struct drm_device *dev) 8270void intel_modeset_cleanup(struct drm_device *dev)
@@ -7280,19 +8303,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
7280 * enqueue unpin/hotplug work. */ 8303 * enqueue unpin/hotplug work. */
7281 drm_irq_uninstall(dev); 8304 drm_irq_uninstall(dev);
7282 cancel_work_sync(&dev_priv->hotplug_work); 8305 cancel_work_sync(&dev_priv->hotplug_work);
7283 cancel_work_sync(&dev_priv->rps_work); 8306 cancel_work_sync(&dev_priv->rps.work);
7284 8307
7285 /* flush any delayed tasks or pending work */ 8308 /* flush any delayed tasks or pending work */
7286 flush_scheduled_work(); 8309 flush_scheduled_work();
7287 8310
7288 /* Shut off idle work before the crtcs get freed. */
7289 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7290 intel_crtc = to_intel_crtc(crtc);
7291 del_timer_sync(&intel_crtc->idle_timer);
7292 }
7293 del_timer_sync(&dev_priv->idle_timer);
7294 cancel_work_sync(&dev_priv->idle_work);
7295
7296 drm_mode_config_cleanup(dev); 8311 drm_mode_config_cleanup(dev);
7297} 8312}
7298 8313
@@ -7338,7 +8353,7 @@ struct intel_display_error_state {
7338 u32 position; 8353 u32 position;
7339 u32 base; 8354 u32 base;
7340 u32 size; 8355 u32 size;
7341 } cursor[2]; 8356 } cursor[I915_MAX_PIPES];
7342 8357
7343 struct intel_pipe_error_state { 8358 struct intel_pipe_error_state {
7344 u32 conf; 8359 u32 conf;
@@ -7350,7 +8365,7 @@ struct intel_display_error_state {
7350 u32 vtotal; 8365 u32 vtotal;
7351 u32 vblank; 8366 u32 vblank;
7352 u32 vsync; 8367 u32 vsync;
7353 } pipe[2]; 8368 } pipe[I915_MAX_PIPES];
7354 8369
7355 struct intel_plane_error_state { 8370 struct intel_plane_error_state {
7356 u32 control; 8371 u32 control;
@@ -7360,7 +8375,7 @@ struct intel_display_error_state {
7360 u32 addr; 8375 u32 addr;
7361 u32 surface; 8376 u32 surface;
7362 u32 tile_offset; 8377 u32 tile_offset;
7363 } plane[2]; 8378 } plane[I915_MAX_PIPES];
7364}; 8379};
7365 8380
7366struct intel_display_error_state * 8381struct intel_display_error_state *
@@ -7374,7 +8389,7 @@ intel_display_capture_error_state(struct drm_device *dev)
7374 if (error == NULL) 8389 if (error == NULL)
7375 return NULL; 8390 return NULL;
7376 8391
7377 for (i = 0; i < 2; i++) { 8392 for_each_pipe(i) {
7378 error->cursor[i].control = I915_READ(CURCNTR(i)); 8393 error->cursor[i].control = I915_READ(CURCNTR(i));
7379 error->cursor[i].position = I915_READ(CURPOS(i)); 8394 error->cursor[i].position = I915_READ(CURPOS(i));
7380 error->cursor[i].base = I915_READ(CURBASE(i)); 8395 error->cursor[i].base = I915_READ(CURBASE(i));
@@ -7407,9 +8422,11 @@ intel_display_print_error_state(struct seq_file *m,
7407 struct drm_device *dev, 8422 struct drm_device *dev,
7408 struct intel_display_error_state *error) 8423 struct intel_display_error_state *error)
7409{ 8424{
8425 drm_i915_private_t *dev_priv = dev->dev_private;
7410 int i; 8426 int i;
7411 8427
7412 for (i = 0; i < 2; i++) { 8428 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
8429 for_each_pipe(i) {
7413 seq_printf(m, "Pipe [%d]:\n", i); 8430 seq_printf(m, "Pipe [%d]:\n", i);
7414 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); 8431 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
7415 seq_printf(m, " SRC: %08x\n", error->pipe[i].source); 8432 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f1bd4f4cd667..6c8746c030c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -35,42 +35,10 @@
35#include "intel_drv.h" 35#include "intel_drv.h"
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38#include <drm/drm_dp_helper.h>
39 38
40#define DP_RECEIVER_CAP_SIZE 0xf
41#define DP_LINK_STATUS_SIZE 6 39#define DP_LINK_STATUS_SIZE 6
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43 41
44#define DP_LINK_CONFIGURATION_SIZE 9
45
46struct intel_dp {
47 struct intel_encoder base;
48 uint32_t output_reg;
49 uint32_t DP;
50 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
51 bool has_audio;
52 enum hdmi_force_audio force_audio;
53 uint32_t color_range;
54 int dpms_mode;
55 uint8_t link_bw;
56 uint8_t lane_count;
57 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
58 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp;
61 uint8_t train_set[4];
62 int panel_power_up_delay;
63 int panel_power_down_delay;
64 int panel_power_cycle_delay;
65 int backlight_on_delay;
66 int backlight_off_delay;
67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
68 struct delayed_work panel_vdd_work;
69 bool want_panel_vdd;
70 struct edid *edid; /* cached EDID for eDP */
71 int edid_mode_count;
72};
73
74/** 42/**
75 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 43 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
76 * @intel_dp: DP struct 44 * @intel_dp: DP struct
@@ -839,9 +807,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
839 } 807 }
840} 808}
841 809
842static void ironlake_edp_pll_on(struct drm_encoder *encoder);
843static void ironlake_edp_pll_off(struct drm_encoder *encoder);
844
845static void 810static void
846intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 811intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
847 struct drm_display_mode *adjusted_mode) 812 struct drm_display_mode *adjusted_mode)
@@ -852,14 +817,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 struct drm_crtc *crtc = intel_dp->base.base.crtc; 817 struct drm_crtc *crtc = intel_dp->base.base.crtc;
853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
854 819
855 /* Turn on the eDP PLL if needed */
856 if (is_edp(intel_dp)) {
857 if (!is_pch_edp(intel_dp))
858 ironlake_edp_pll_on(encoder);
859 else
860 ironlake_edp_pll_off(encoder);
861 }
862
863 /* 820 /*
864 * There are four kinds of DP registers: 821 * There are four kinds of DP registers:
865 * 822 *
@@ -881,10 +838,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
881 * supposed to be read-only. 838 * supposed to be read-only.
882 */ 839 */
883 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 840 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
884 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
885 841
886 /* Handle DP bits in common between all three register formats */ 842 /* Handle DP bits in common between all three register formats */
887
888 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
889 844
890 switch (intel_dp->lane_count) { 845 switch (intel_dp->lane_count) {
@@ -931,7 +886,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
931 intel_dp->DP |= intel_crtc->pipe << 29; 886 intel_dp->DP |= intel_crtc->pipe << 29;
932 887
933 /* don't miss out required setting for eDP */ 888 /* don't miss out required setting for eDP */
934 intel_dp->DP |= DP_PLL_ENABLE;
935 if (adjusted_mode->clock < 200000) 889 if (adjusted_mode->clock < 200000)
936 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 890 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
937 else 891 else
@@ -953,7 +907,6 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
953 907
954 if (is_cpu_edp(intel_dp)) { 908 if (is_cpu_edp(intel_dp)) {
955 /* don't miss out required setting for eDP */ 909 /* don't miss out required setting for eDP */
956 intel_dp->DP |= DP_PLL_ENABLE;
957 if (adjusted_mode->clock < 200000) 910 if (adjusted_mode->clock < 200000)
958 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 911 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
959 else 912 else
@@ -1224,27 +1177,49 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1224 msleep(intel_dp->backlight_off_delay); 1177 msleep(intel_dp->backlight_off_delay);
1225} 1178}
1226 1179
1227static void ironlake_edp_pll_on(struct drm_encoder *encoder) 1180static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1228{ 1181{
1229 struct drm_device *dev = encoder->dev; 1182 struct drm_device *dev = intel_dp->base.base.dev;
1183 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1230 struct drm_i915_private *dev_priv = dev->dev_private; 1184 struct drm_i915_private *dev_priv = dev->dev_private;
1231 u32 dpa_ctl; 1185 u32 dpa_ctl;
1232 1186
1187 assert_pipe_disabled(dev_priv,
1188 to_intel_crtc(crtc)->pipe);
1189
1233 DRM_DEBUG_KMS("\n"); 1190 DRM_DEBUG_KMS("\n");
1234 dpa_ctl = I915_READ(DP_A); 1191 dpa_ctl = I915_READ(DP_A);
1235 dpa_ctl |= DP_PLL_ENABLE; 1192 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1236 I915_WRITE(DP_A, dpa_ctl); 1193 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1194
1195 /* We don't adjust intel_dp->DP while tearing down the link, to
1196 * facilitate link retraining (e.g. after hotplug). Hence clear all
1197 * enable bits here to ensure that we don't enable too much. */
1198 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1199 intel_dp->DP |= DP_PLL_ENABLE;
1200 I915_WRITE(DP_A, intel_dp->DP);
1237 POSTING_READ(DP_A); 1201 POSTING_READ(DP_A);
1238 udelay(200); 1202 udelay(200);
1239} 1203}
1240 1204
1241static void ironlake_edp_pll_off(struct drm_encoder *encoder) 1205static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1242{ 1206{
1243 struct drm_device *dev = encoder->dev; 1207 struct drm_device *dev = intel_dp->base.base.dev;
1208 struct drm_crtc *crtc = intel_dp->base.base.crtc;
1244 struct drm_i915_private *dev_priv = dev->dev_private; 1209 struct drm_i915_private *dev_priv = dev->dev_private;
1245 u32 dpa_ctl; 1210 u32 dpa_ctl;
1246 1211
1212 assert_pipe_disabled(dev_priv,
1213 to_intel_crtc(crtc)->pipe);
1214
1247 dpa_ctl = I915_READ(DP_A); 1215 dpa_ctl = I915_READ(DP_A);
1216 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1217 "dp pll off, should be on\n");
1218 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1219
1220 /* We can't rely on the value tracked for the DP register in
1221 * intel_dp->DP because link_down must not change that (otherwise link
1222 * re-training will fail. */
1248 dpa_ctl &= ~DP_PLL_ENABLE; 1223 dpa_ctl &= ~DP_PLL_ENABLE;
1249 I915_WRITE(DP_A, dpa_ctl); 1224 I915_WRITE(DP_A, dpa_ctl);
1250 POSTING_READ(DP_A); 1225 POSTING_READ(DP_A);
@@ -1281,10 +1256,57 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1281 } 1256 }
1282} 1257}
1283 1258
1284static void intel_dp_prepare(struct drm_encoder *encoder) 1259static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1260 enum pipe *pipe)
1285{ 1261{
1286 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1262 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1263 struct drm_device *dev = encoder->base.dev;
1264 struct drm_i915_private *dev_priv = dev->dev_private;
1265 u32 tmp = I915_READ(intel_dp->output_reg);
1287 1266
1267 if (!(tmp & DP_PORT_EN))
1268 return false;
1269
1270 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1271 *pipe = PORT_TO_PIPE_CPT(tmp);
1272 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1273 *pipe = PORT_TO_PIPE(tmp);
1274 } else {
1275 u32 trans_sel;
1276 u32 trans_dp;
1277 int i;
1278
1279 switch (intel_dp->output_reg) {
1280 case PCH_DP_B:
1281 trans_sel = TRANS_DP_PORT_SEL_B;
1282 break;
1283 case PCH_DP_C:
1284 trans_sel = TRANS_DP_PORT_SEL_C;
1285 break;
1286 case PCH_DP_D:
1287 trans_sel = TRANS_DP_PORT_SEL_D;
1288 break;
1289 default:
1290 return true;
1291 }
1292
1293 for_each_pipe(i) {
1294 trans_dp = I915_READ(TRANS_DP_CTL(i));
1295 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1296 *pipe = i;
1297 return true;
1298 }
1299 }
1300 }
1301
1302 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
1303
1304 return true;
1305}
1306
1307static void intel_disable_dp(struct intel_encoder *encoder)
1308{
1309 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1288 1310
1289 /* Make sure the panel is off before trying to change the mode. But also 1311 /* Make sure the panel is off before trying to change the mode. But also
1290 * ensure that we have vdd while we switch off the panel. */ 1312 * ensure that we have vdd while we switch off the panel. */
@@ -1292,14 +1314,31 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1292 ironlake_edp_backlight_off(intel_dp); 1314 ironlake_edp_backlight_off(intel_dp);
1293 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1315 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1294 ironlake_edp_panel_off(intel_dp); 1316 ironlake_edp_panel_off(intel_dp);
1295 intel_dp_link_down(intel_dp); 1317
1318 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1319 if (!is_cpu_edp(intel_dp))
1320 intel_dp_link_down(intel_dp);
1296} 1321}
1297 1322
1298static void intel_dp_commit(struct drm_encoder *encoder) 1323static void intel_post_disable_dp(struct intel_encoder *encoder)
1299{ 1324{
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1325 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1301 struct drm_device *dev = encoder->dev; 1326
1302 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1327 if (is_cpu_edp(intel_dp)) {
1328 intel_dp_link_down(intel_dp);
1329 ironlake_edp_pll_off(intel_dp);
1330 }
1331}
1332
1333static void intel_enable_dp(struct intel_encoder *encoder)
1334{
1335 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1336 struct drm_device *dev = encoder->base.dev;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1339
1340 if (WARN_ON(dp_reg & DP_PORT_EN))
1341 return;
1303 1342
1304 ironlake_edp_panel_vdd_on(intel_dp); 1343 ironlake_edp_panel_vdd_on(intel_dp);
1305 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1344 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1308,47 +1347,14 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1308 ironlake_edp_panel_vdd_off(intel_dp, true); 1347 ironlake_edp_panel_vdd_off(intel_dp, true);
1309 intel_dp_complete_link_train(intel_dp); 1348 intel_dp_complete_link_train(intel_dp);
1310 ironlake_edp_backlight_on(intel_dp); 1349 ironlake_edp_backlight_on(intel_dp);
1311
1312 intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
1313
1314 if (HAS_PCH_CPT(dev))
1315 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
1316} 1350}
1317 1351
1318static void 1352static void intel_pre_enable_dp(struct intel_encoder *encoder)
1319intel_dp_dpms(struct drm_encoder *encoder, int mode)
1320{ 1353{
1321 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1354 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1322 struct drm_device *dev = encoder->dev;
1323 struct drm_i915_private *dev_priv = dev->dev_private;
1324 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1325 1355
1326 if (mode != DRM_MODE_DPMS_ON) { 1356 if (is_cpu_edp(intel_dp))
1327 /* Switching the panel off requires vdd. */ 1357 ironlake_edp_pll_on(intel_dp);
1328 ironlake_edp_panel_vdd_on(intel_dp);
1329 ironlake_edp_backlight_off(intel_dp);
1330 intel_dp_sink_dpms(intel_dp, mode);
1331 ironlake_edp_panel_off(intel_dp);
1332 intel_dp_link_down(intel_dp);
1333
1334 if (is_cpu_edp(intel_dp))
1335 ironlake_edp_pll_off(encoder);
1336 } else {
1337 if (is_cpu_edp(intel_dp))
1338 ironlake_edp_pll_on(encoder);
1339
1340 ironlake_edp_panel_vdd_on(intel_dp);
1341 intel_dp_sink_dpms(intel_dp, mode);
1342 if (!(dp_reg & DP_PORT_EN)) {
1343 intel_dp_start_link_train(intel_dp);
1344 ironlake_edp_panel_on(intel_dp);
1345 ironlake_edp_panel_vdd_off(intel_dp, true);
1346 intel_dp_complete_link_train(intel_dp);
1347 } else
1348 ironlake_edp_panel_vdd_off(intel_dp, false);
1349 ironlake_edp_backlight_on(intel_dp);
1350 }
1351 intel_dp->dpms_mode = mode;
1352} 1358}
1353 1359
1354/* 1360/*
@@ -1667,6 +1673,45 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1667 struct drm_i915_private *dev_priv = dev->dev_private; 1673 struct drm_i915_private *dev_priv = dev->dev_private;
1668 int ret; 1674 int ret;
1669 1675
1676 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1677 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1678
1679 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1680 case DP_TRAINING_PATTERN_DISABLE:
1681 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1682 break;
1683 case DP_TRAINING_PATTERN_1:
1684 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1685 break;
1686 case DP_TRAINING_PATTERN_2:
1687 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1688 break;
1689 case DP_TRAINING_PATTERN_3:
1690 DRM_ERROR("DP training pattern 3 not supported\n");
1691 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1692 break;
1693 }
1694
1695 } else {
1696 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1697
1698 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1699 case DP_TRAINING_PATTERN_DISABLE:
1700 dp_reg_value |= DP_LINK_TRAIN_OFF;
1701 break;
1702 case DP_TRAINING_PATTERN_1:
1703 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1704 break;
1705 case DP_TRAINING_PATTERN_2:
1706 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1707 break;
1708 case DP_TRAINING_PATTERN_3:
1709 DRM_ERROR("DP training pattern 3 not supported\n");
1710 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1711 break;
1712 }
1713 }
1714
1670 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1715 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1671 POSTING_READ(intel_dp->output_reg); 1716 POSTING_READ(intel_dp->output_reg);
1672 1717
@@ -1674,12 +1719,15 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1674 DP_TRAINING_PATTERN_SET, 1719 DP_TRAINING_PATTERN_SET,
1675 dp_train_pat); 1720 dp_train_pat);
1676 1721
1677 ret = intel_dp_aux_native_write(intel_dp, 1722 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1678 DP_TRAINING_LANE0_SET, 1723 DP_TRAINING_PATTERN_DISABLE) {
1679 intel_dp->train_set, 1724 ret = intel_dp_aux_native_write(intel_dp,
1680 intel_dp->lane_count); 1725 DP_TRAINING_LANE0_SET,
1681 if (ret != intel_dp->lane_count) 1726 intel_dp->train_set,
1682 return false; 1727 intel_dp->lane_count);
1728 if (ret != intel_dp->lane_count)
1729 return false;
1730 }
1683 1731
1684 return true; 1732 return true;
1685} 1733}
@@ -1689,26 +1737,12 @@ static void
1689intel_dp_start_link_train(struct intel_dp *intel_dp) 1737intel_dp_start_link_train(struct intel_dp *intel_dp)
1690{ 1738{
1691 struct drm_device *dev = intel_dp->base.base.dev; 1739 struct drm_device *dev = intel_dp->base.base.dev;
1692 struct drm_i915_private *dev_priv = dev->dev_private;
1693 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1694 int i; 1740 int i;
1695 uint8_t voltage; 1741 uint8_t voltage;
1696 bool clock_recovery = false; 1742 bool clock_recovery = false;
1697 int voltage_tries, loop_tries; 1743 int voltage_tries, loop_tries;
1698 u32 reg;
1699 uint32_t DP = intel_dp->DP; 1744 uint32_t DP = intel_dp->DP;
1700 1745
1701 /*
1702 * On CPT we have to enable the port in training pattern 1, which
1703 * will happen below in intel_dp_set_link_train. Otherwise, enable
1704 * the port and wait for it to become active.
1705 */
1706 if (!HAS_PCH_CPT(dev)) {
1707 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
1708 POSTING_READ(intel_dp->output_reg);
1709 intel_wait_for_vblank(dev, intel_crtc->pipe);
1710 }
1711
1712 /* Write the link configuration data */ 1746 /* Write the link configuration data */
1713 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1747 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1714 intel_dp->link_configuration, 1748 intel_dp->link_configuration,
@@ -1716,10 +1750,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1716 1750
1717 DP |= DP_PORT_EN; 1751 DP |= DP_PORT_EN;
1718 1752
1719 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1720 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1721 else
1722 DP &= ~DP_LINK_TRAIN_MASK;
1723 memset(intel_dp->train_set, 0, 4); 1753 memset(intel_dp->train_set, 0, 4);
1724 voltage = 0xff; 1754 voltage = 0xff;
1725 voltage_tries = 0; 1755 voltage_tries = 0;
@@ -1743,12 +1773,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1743 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1773 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1744 } 1774 }
1745 1775
1746 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1776 if (!intel_dp_set_link_train(intel_dp, DP,
1747 reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
1748 else
1749 reg = DP | DP_LINK_TRAIN_PAT_1;
1750
1751 if (!intel_dp_set_link_train(intel_dp, reg,
1752 DP_TRAINING_PATTERN_1 | 1777 DP_TRAINING_PATTERN_1 |
1753 DP_LINK_SCRAMBLING_DISABLE)) 1778 DP_LINK_SCRAMBLING_DISABLE))
1754 break; 1779 break;
@@ -1803,10 +1828,8 @@ static void
1803intel_dp_complete_link_train(struct intel_dp *intel_dp) 1828intel_dp_complete_link_train(struct intel_dp *intel_dp)
1804{ 1829{
1805 struct drm_device *dev = intel_dp->base.base.dev; 1830 struct drm_device *dev = intel_dp->base.base.dev;
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 bool channel_eq = false; 1831 bool channel_eq = false;
1808 int tries, cr_tries; 1832 int tries, cr_tries;
1809 u32 reg;
1810 uint32_t DP = intel_dp->DP; 1833 uint32_t DP = intel_dp->DP;
1811 1834
1812 /* channel equalization */ 1835 /* channel equalization */
@@ -1835,13 +1858,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1835 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1858 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1836 } 1859 }
1837 1860
1838 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1839 reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
1840 else
1841 reg = DP | DP_LINK_TRAIN_PAT_2;
1842
1843 /* channel eq pattern */ 1861 /* channel eq pattern */
1844 if (!intel_dp_set_link_train(intel_dp, reg, 1862 if (!intel_dp_set_link_train(intel_dp, DP,
1845 DP_TRAINING_PATTERN_2 | 1863 DP_TRAINING_PATTERN_2 |
1846 DP_LINK_SCRAMBLING_DISABLE)) 1864 DP_LINK_SCRAMBLING_DISABLE))
1847 break; 1865 break;
@@ -1876,15 +1894,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1876 ++tries; 1894 ++tries;
1877 } 1895 }
1878 1896
1879 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1897 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1880 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1881 else
1882 reg = DP | DP_LINK_TRAIN_OFF;
1883
1884 I915_WRITE(intel_dp->output_reg, reg);
1885 POSTING_READ(intel_dp->output_reg);
1886 intel_dp_aux_native_write_1(intel_dp,
1887 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1888} 1898}
1889 1899
1890static void 1900static void
@@ -1894,18 +1904,11 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1894 struct drm_i915_private *dev_priv = dev->dev_private; 1904 struct drm_i915_private *dev_priv = dev->dev_private;
1895 uint32_t DP = intel_dp->DP; 1905 uint32_t DP = intel_dp->DP;
1896 1906
1897 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) 1907 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1898 return; 1908 return;
1899 1909
1900 DRM_DEBUG_KMS("\n"); 1910 DRM_DEBUG_KMS("\n");
1901 1911
1902 if (is_edp(intel_dp)) {
1903 DP &= ~DP_PLL_ENABLE;
1904 I915_WRITE(intel_dp->output_reg, DP);
1905 POSTING_READ(intel_dp->output_reg);
1906 udelay(100);
1907 }
1908
1909 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1912 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1910 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1913 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1911 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1914 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
@@ -1917,13 +1920,6 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1917 1920
1918 msleep(17); 1921 msleep(17);
1919 1922
1920 if (is_edp(intel_dp)) {
1921 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
1922 DP |= DP_LINK_TRAIN_OFF_CPT;
1923 else
1924 DP |= DP_LINK_TRAIN_OFF;
1925 }
1926
1927 if (HAS_PCH_IBX(dev) && 1923 if (HAS_PCH_IBX(dev) &&
1928 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1924 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1929 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1925 struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2032,10 +2028,10 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2032 u8 sink_irq_vector; 2028 u8 sink_irq_vector;
2033 u8 link_status[DP_LINK_STATUS_SIZE]; 2029 u8 link_status[DP_LINK_STATUS_SIZE];
2034 2030
2035 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 2031 if (!intel_dp->base.connectors_active)
2036 return; 2032 return;
2037 2033
2038 if (!intel_dp->base.base.crtc) 2034 if (WARN_ON(!intel_dp->base.base.crtc))
2039 return; 2035 return;
2040 2036
2041 /* Try to read receiver status if the link appears to be up */ 2037 /* Try to read receiver status if the link appears to be up */
@@ -2159,7 +2155,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2159 ret = drm_add_edid_modes(connector, intel_dp->edid); 2155 ret = drm_add_edid_modes(connector, intel_dp->edid);
2160 drm_edid_to_eld(connector, 2156 drm_edid_to_eld(connector,
2161 intel_dp->edid); 2157 intel_dp->edid);
2162 connector->display_info.raw_edid = NULL;
2163 return intel_dp->edid_mode_count; 2158 return intel_dp->edid_mode_count;
2164 } 2159 }
2165 2160
@@ -2205,7 +2200,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2205 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2200 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2206 if (edid) { 2201 if (edid) {
2207 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2202 intel_dp->has_audio = drm_detect_monitor_audio(edid);
2208 connector->display_info.raw_edid = NULL;
2209 kfree(edid); 2203 kfree(edid);
2210 } 2204 }
2211 } 2205 }
@@ -2270,8 +2264,6 @@ intel_dp_detect_audio(struct drm_connector *connector)
2270 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2264 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2271 if (edid) { 2265 if (edid) {
2272 has_audio = drm_detect_monitor_audio(edid); 2266 has_audio = drm_detect_monitor_audio(edid);
2273
2274 connector->display_info.raw_edid = NULL;
2275 kfree(edid); 2267 kfree(edid);
2276 } 2268 }
2277 2269
@@ -2325,9 +2317,8 @@ intel_dp_set_property(struct drm_connector *connector,
2325done: 2317done:
2326 if (intel_dp->base.base.crtc) { 2318 if (intel_dp->base.base.crtc) {
2327 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2319 struct drm_crtc *crtc = intel_dp->base.base.crtc;
2328 drm_crtc_helper_set_mode(crtc, &crtc->mode, 2320 intel_set_mode(crtc, &crtc->mode,
2329 crtc->x, crtc->y, 2321 crtc->x, crtc->y, crtc->fb);
2330 crtc->fb);
2331 } 2322 }
2332 2323
2333 return 0; 2324 return 0;
@@ -2361,15 +2352,13 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2361} 2352}
2362 2353
2363static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2354static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2364 .dpms = intel_dp_dpms,
2365 .mode_fixup = intel_dp_mode_fixup, 2355 .mode_fixup = intel_dp_mode_fixup,
2366 .prepare = intel_dp_prepare,
2367 .mode_set = intel_dp_mode_set, 2356 .mode_set = intel_dp_mode_set,
2368 .commit = intel_dp_commit, 2357 .disable = intel_encoder_noop,
2369}; 2358};
2370 2359
2371static const struct drm_connector_funcs intel_dp_connector_funcs = { 2360static const struct drm_connector_funcs intel_dp_connector_funcs = {
2372 .dpms = drm_helper_connector_dpms, 2361 .dpms = intel_connector_dpms,
2373 .detect = intel_dp_detect, 2362 .detect = intel_dp_detect,
2374 .fill_modes = drm_helper_probe_single_connector_modes, 2363 .fill_modes = drm_helper_probe_single_connector_modes,
2375 .set_property = intel_dp_set_property, 2364 .set_property = intel_dp_set_property,
@@ -2440,7 +2429,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
2440} 2429}
2441 2430
2442void 2431void
2443intel_dp_init(struct drm_device *dev, int output_reg) 2432intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2444{ 2433{
2445 struct drm_i915_private *dev_priv = dev->dev_private; 2434 struct drm_i915_private *dev_priv = dev->dev_private;
2446 struct drm_connector *connector; 2435 struct drm_connector *connector;
@@ -2455,7 +2444,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2455 return; 2444 return;
2456 2445
2457 intel_dp->output_reg = output_reg; 2446 intel_dp->output_reg = output_reg;
2458 intel_dp->dpms_mode = -1; 2447 intel_dp->port = port;
2448 /* Preserve the current hw state. */
2449 intel_dp->DP = I915_READ(intel_dp->output_reg);
2459 2450
2460 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2451 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2461 if (!intel_connector) { 2452 if (!intel_connector) {
@@ -2482,18 +2473,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2482 2473
2483 connector->polled = DRM_CONNECTOR_POLL_HPD; 2474 connector->polled = DRM_CONNECTOR_POLL_HPD;
2484 2475
2485 if (output_reg == DP_B || output_reg == PCH_DP_B) 2476 intel_encoder->cloneable = false;
2486 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
2487 else if (output_reg == DP_C || output_reg == PCH_DP_C)
2488 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
2489 else if (output_reg == DP_D || output_reg == PCH_DP_D)
2490 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
2491 2477
2492 if (is_edp(intel_dp)) { 2478 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2493 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 2479 ironlake_panel_vdd_work);
2494 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2495 ironlake_panel_vdd_work);
2496 }
2497 2480
2498 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2481 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2499 2482
@@ -2507,29 +2490,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2507 intel_connector_attach_encoder(intel_connector, intel_encoder); 2490 intel_connector_attach_encoder(intel_connector, intel_encoder);
2508 drm_sysfs_connector_add(connector); 2491 drm_sysfs_connector_add(connector);
2509 2492
2493 intel_encoder->enable = intel_enable_dp;
2494 intel_encoder->pre_enable = intel_pre_enable_dp;
2495 intel_encoder->disable = intel_disable_dp;
2496 intel_encoder->post_disable = intel_post_disable_dp;
2497 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2498 intel_connector->get_hw_state = intel_connector_get_hw_state;
2499
2510 /* Set up the DDC bus. */ 2500 /* Set up the DDC bus. */
2511 switch (output_reg) { 2501 switch (port) {
2512 case DP_A: 2502 case PORT_A:
2513 name = "DPDDC-A"; 2503 name = "DPDDC-A";
2514 break; 2504 break;
2515 case DP_B: 2505 case PORT_B:
2516 case PCH_DP_B: 2506 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2517 dev_priv->hotplug_supported_mask |= 2507 name = "DPDDC-B";
2518 DPB_HOTPLUG_INT_STATUS; 2508 break;
2519 name = "DPDDC-B"; 2509 case PORT_C:
2520 break; 2510 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2521 case DP_C: 2511 name = "DPDDC-C";
2522 case PCH_DP_C: 2512 break;
2523 dev_priv->hotplug_supported_mask |= 2513 case PORT_D:
2524 DPC_HOTPLUG_INT_STATUS; 2514 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2525 name = "DPDDC-C"; 2515 name = "DPDDC-D";
2526 break; 2516 break;
2527 case DP_D: 2517 default:
2528 case PCH_DP_D: 2518 WARN(1, "Invalid port %c\n", port_name(port));
2529 dev_priv->hotplug_supported_mask |= 2519 break;
2530 DPD_HOTPLUG_INT_STATUS;
2531 name = "DPDDC-D";
2532 break;
2533 } 2520 }
2534 2521
2535 /* Cache some DPCD data in the eDP case */ 2522 /* Cache some DPCD data in the eDP case */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 7db849052a98..05cc7c372fc5 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -31,6 +31,7 @@
31#include <drm/drm_crtc.h> 31#include <drm/drm_crtc.h>
32#include <drm/drm_crtc_helper.h> 32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h> 33#include <drm/drm_fb_helper.h>
34#include <drm/drm_dp_helper.h>
34 35
35#define _wait_for(COND, MS, W) ({ \ 36#define _wait_for(COND, MS, W) ({ \
36 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 37 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
@@ -40,7 +41,11 @@
40 ret__ = -ETIMEDOUT; \ 41 ret__ = -ETIMEDOUT; \
41 break; \ 42 break; \
42 } \ 43 } \
43 if (W && drm_can_sleep()) msleep(W); \ 44 if (W && drm_can_sleep()) { \
45 msleep(W); \
46 } else { \
47 cpu_relax(); \
48 } \
44 } \ 49 } \
45 ret__; \ 50 ret__; \
46}) 51})
@@ -90,25 +95,6 @@
90#define INTEL_OUTPUT_DISPLAYPORT 7 95#define INTEL_OUTPUT_DISPLAYPORT 7
91#define INTEL_OUTPUT_EDP 8 96#define INTEL_OUTPUT_EDP 8
92 97
93/* Intel Pipe Clone Bit */
94#define INTEL_HDMIB_CLONE_BIT 1
95#define INTEL_HDMIC_CLONE_BIT 2
96#define INTEL_HDMID_CLONE_BIT 3
97#define INTEL_HDMIE_CLONE_BIT 4
98#define INTEL_HDMIF_CLONE_BIT 5
99#define INTEL_SDVO_NON_TV_CLONE_BIT 6
100#define INTEL_SDVO_TV_CLONE_BIT 7
101#define INTEL_SDVO_LVDS_CLONE_BIT 8
102#define INTEL_ANALOG_CLONE_BIT 9
103#define INTEL_TV_CLONE_BIT 10
104#define INTEL_DP_B_CLONE_BIT 11
105#define INTEL_DP_C_CLONE_BIT 12
106#define INTEL_DP_D_CLONE_BIT 13
107#define INTEL_LVDS_CLONE_BIT 14
108#define INTEL_DVO_TMDS_CLONE_BIT 15
109#define INTEL_DVO_LVDS_CLONE_BIT 16
110#define INTEL_EDP_CLONE_BIT 17
111
112#define INTEL_DVO_CHIP_NONE 0 98#define INTEL_DVO_CHIP_NONE 0
113#define INTEL_DVO_CHIP_LVDS 1 99#define INTEL_DVO_CHIP_LVDS 1
114#define INTEL_DVO_CHIP_TMDS 2 100#define INTEL_DVO_CHIP_TMDS 2
@@ -151,16 +137,48 @@ struct intel_fbdev {
151 137
152struct intel_encoder { 138struct intel_encoder {
153 struct drm_encoder base; 139 struct drm_encoder base;
140 /*
141 * The new crtc this encoder will be driven from. Only differs from
142 * base->crtc while a modeset is in progress.
143 */
144 struct intel_crtc *new_crtc;
145
154 int type; 146 int type;
155 bool needs_tv_clock; 147 bool needs_tv_clock;
148 /*
149 * Intel hw has only one MUX where encoders could be clone, hence a
150 * simple flag is enough to compute the possible_clones mask.
151 */
152 bool cloneable;
153 bool connectors_active;
156 void (*hot_plug)(struct intel_encoder *); 154 void (*hot_plug)(struct intel_encoder *);
155 void (*pre_enable)(struct intel_encoder *);
156 void (*enable)(struct intel_encoder *);
157 void (*disable)(struct intel_encoder *);
158 void (*post_disable)(struct intel_encoder *);
159 /* Read out the current hw state of this connector, returning true if
160 * the encoder is active. If the encoder is enabled it also set the pipe
161 * it is connected to in the pipe parameter. */
162 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
157 int crtc_mask; 163 int crtc_mask;
158 int clone_mask;
159}; 164};
160 165
161struct intel_connector { 166struct intel_connector {
162 struct drm_connector base; 167 struct drm_connector base;
168 /*
169 * The fixed encoder this connector is connected to.
170 */
163 struct intel_encoder *encoder; 171 struct intel_encoder *encoder;
172
173 /*
174 * The new encoder this connector will be driven. Only differs from
175 * encoder while a modeset is in progress.
176 */
177 struct intel_encoder *new_encoder;
178
179 /* Reads out the current hw, returning true if the connector is enabled
180 * and active (i.e. dpms ON state). */
181 bool (*get_hw_state)(struct intel_connector *);
164}; 182};
165 183
166struct intel_crtc { 184struct intel_crtc {
@@ -168,11 +186,13 @@ struct intel_crtc {
168 enum pipe pipe; 186 enum pipe pipe;
169 enum plane plane; 187 enum plane plane;
170 u8 lut_r[256], lut_g[256], lut_b[256]; 188 u8 lut_r[256], lut_g[256], lut_b[256];
171 int dpms_mode; 189 /*
172 bool active; /* is the crtc on? independent of the dpms mode */ 190 * Whether the crtc and the connected output pipeline is active. Implies
191 * that crtc->enabled is set, i.e. the current mode configuration has
192 * some outputs connected to this crtc.
193 */
194 bool active;
173 bool primary_disabled; /* is the crtc obscured by a plane? */ 195 bool primary_disabled; /* is the crtc obscured by a plane? */
174 bool busy; /* is scanout buffer being updated frequently? */
175 struct timer_list idle_timer;
176 bool lowfreq_avail; 196 bool lowfreq_avail;
177 struct intel_overlay *overlay; 197 struct intel_overlay *overlay;
178 struct intel_unpin_work *unpin_work; 198 struct intel_unpin_work *unpin_work;
@@ -311,6 +331,37 @@ struct intel_hdmi {
311 struct drm_display_mode *adjusted_mode); 331 struct drm_display_mode *adjusted_mode);
312}; 332};
313 333
334#define DP_RECEIVER_CAP_SIZE 0xf
335#define DP_LINK_CONFIGURATION_SIZE 9
336
337struct intel_dp {
338 struct intel_encoder base;
339 uint32_t output_reg;
340 uint32_t DP;
341 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
342 bool has_audio;
343 enum hdmi_force_audio force_audio;
344 enum port port;
345 uint32_t color_range;
346 uint8_t link_bw;
347 uint8_t lane_count;
348 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
349 struct i2c_adapter adapter;
350 struct i2c_algo_dp_aux_data algo;
351 bool is_pch_edp;
352 uint8_t train_set[4];
353 int panel_power_up_delay;
354 int panel_power_down_delay;
355 int panel_power_cycle_delay;
356 int backlight_on_delay;
357 int backlight_off_delay;
358 struct drm_display_mode *panel_fixed_mode; /* for eDP */
359 struct delayed_work panel_vdd_work;
360 bool want_panel_vdd;
361 struct edid *edid; /* cached EDID for eDP */
362 int edid_mode_count;
363};
364
314static inline struct drm_crtc * 365static inline struct drm_crtc *
315intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 366intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
316{ 367{
@@ -350,17 +401,21 @@ extern void intel_attach_force_audio_property(struct drm_connector *connector);
350extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 401extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
351 402
352extern void intel_crt_init(struct drm_device *dev); 403extern void intel_crt_init(struct drm_device *dev);
353extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 404extern void intel_hdmi_init(struct drm_device *dev,
405 int sdvox_reg, enum port port);
354extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 406extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
355extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 407extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
356extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 408extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
357 bool is_sdvob); 409 bool is_sdvob);
358extern void intel_dvo_init(struct drm_device *dev); 410extern void intel_dvo_init(struct drm_device *dev);
359extern void intel_tv_init(struct drm_device *dev); 411extern void intel_tv_init(struct drm_device *dev);
360extern void intel_mark_busy(struct drm_device *dev, 412extern void intel_mark_busy(struct drm_device *dev);
361 struct drm_i915_gem_object *obj); 413extern void intel_mark_idle(struct drm_device *dev);
414extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
415extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
362extern bool intel_lvds_init(struct drm_device *dev); 416extern bool intel_lvds_init(struct drm_device *dev);
363extern void intel_dp_init(struct drm_device *dev, int dp_reg); 417extern void intel_dp_init(struct drm_device *dev, int output_reg,
418 enum port port);
364void 419void
365intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 420intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
366 struct drm_display_mode *adjusted_mode); 421 struct drm_display_mode *adjusted_mode);
@@ -373,8 +428,6 @@ extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
373extern void intel_flush_display_plane(struct drm_i915_private *dev_priv, 428extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
374 enum plane plane); 429 enum plane plane);
375 430
376void intel_sanitize_pm(struct drm_device *dev);
377
378/* intel_panel.c */ 431/* intel_panel.c */
379extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 432extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
380 struct drm_display_mode *adjusted_mode); 433 struct drm_display_mode *adjusted_mode);
@@ -391,10 +444,27 @@ extern void intel_panel_disable_backlight(struct drm_device *dev);
391extern void intel_panel_destroy_backlight(struct drm_device *dev); 444extern void intel_panel_destroy_backlight(struct drm_device *dev);
392extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); 445extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
393 446
447struct intel_set_config {
448 struct drm_encoder **save_connector_encoders;
449 struct drm_crtc **save_encoder_crtcs;
450
451 bool fb_changed;
452 bool mode_changed;
453};
454
455extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
456 int x, int y, struct drm_framebuffer *old_fb);
457extern void intel_modeset_disable(struct drm_device *dev);
394extern void intel_crtc_load_lut(struct drm_crtc *crtc); 458extern void intel_crtc_load_lut(struct drm_crtc *crtc);
395extern void intel_encoder_prepare(struct drm_encoder *encoder); 459extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
396extern void intel_encoder_commit(struct drm_encoder *encoder); 460extern void intel_encoder_noop(struct drm_encoder *encoder);
397extern void intel_encoder_destroy(struct drm_encoder *encoder); 461extern void intel_encoder_destroy(struct drm_encoder *encoder);
462extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
463extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
464extern void intel_connector_dpms(struct drm_connector *, int mode);
465extern bool intel_connector_get_hw_state(struct intel_connector *connector);
466extern void intel_modeset_check_state(struct drm_device *dev);
467
398 468
399static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 469static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
400{ 470{
@@ -417,12 +487,10 @@ struct intel_load_detect_pipe {
417 bool load_detect_temp; 487 bool load_detect_temp;
418 int dpms_mode; 488 int dpms_mode;
419}; 489};
420extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, 490extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
421 struct drm_connector *connector,
422 struct drm_display_mode *mode, 491 struct drm_display_mode *mode,
423 struct intel_load_detect_pipe *old); 492 struct intel_load_detect_pipe *old);
424extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, 493extern void intel_release_load_detect_pipe(struct drm_connector *connector,
425 struct drm_connector *connector,
426 struct intel_load_detect_pipe *old); 494 struct intel_load_detect_pipe *old);
427 495
428extern void intelfb_restore(void); 496extern void intelfb_restore(void);
@@ -503,7 +571,10 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
503extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); 571extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
504extern void ironlake_teardown_rc6(struct drm_device *dev); 572extern void ironlake_teardown_rc6(struct drm_device *dev);
505 573
506extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode); 574extern void intel_enable_ddi(struct intel_encoder *encoder);
575extern void intel_disable_ddi(struct intel_encoder *encoder);
576extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
577 enum pipe *pipe);
507extern void intel_ddi_mode_set(struct drm_encoder *encoder, 578extern void intel_ddi_mode_set(struct drm_encoder *encoder,
508 struct drm_display_mode *mode, 579 struct drm_display_mode *mode,
509 struct drm_display_mode *adjusted_mode); 580 struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ac9f2dd5648a..15da99533e5b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -36,6 +36,7 @@
36#define SIL164_ADDR 0x38 36#define SIL164_ADDR 0x38
37#define CH7xxx_ADDR 0x76 37#define CH7xxx_ADDR 0x76
38#define TFP410_ADDR 0x38 38#define TFP410_ADDR 0x38
39#define NS2501_ADDR 0x38
39 40
40static const struct intel_dvo_device intel_dvo_devices[] = { 41static const struct intel_dvo_device intel_dvo_devices[] = {
41 { 42 {
@@ -73,7 +74,14 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
73 .slave_addr = 0x75, 74 .slave_addr = 0x75,
74 .gpio = GMBUS_PORT_DPB, 75 .gpio = GMBUS_PORT_DPB,
75 .dev_ops = &ch7017_ops, 76 .dev_ops = &ch7017_ops,
76 } 77 },
78 {
79 .type = INTEL_DVO_CHIP_TMDS,
80 .name = "ns2501",
81 .dvo_reg = DVOC,
82 .slave_addr = NS2501_ADDR,
83 .dev_ops = &ns2501_ops,
84 }
77}; 85};
78 86
79struct intel_dvo { 87struct intel_dvo {
@@ -96,22 +104,91 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
96 struct intel_dvo, base); 104 struct intel_dvo, base);
97} 105}
98 106
99static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 107static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
100{ 108{
101 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 109 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
102 struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); 110
111 return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
112}
113
114static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
115 enum pipe *pipe)
116{
117 struct drm_device *dev = encoder->base.dev;
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
120 u32 tmp;
121
122 tmp = I915_READ(intel_dvo->dev.dvo_reg);
123
124 if (!(tmp & DVO_ENABLE))
125 return false;
126
127 *pipe = PORT_TO_PIPE(tmp);
128
129 return true;
130}
131
132static void intel_disable_dvo(struct intel_encoder *encoder)
133{
134 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
135 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
136 u32 dvo_reg = intel_dvo->dev.dvo_reg;
137 u32 temp = I915_READ(dvo_reg);
138
139 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
140 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
141 I915_READ(dvo_reg);
142}
143
144static void intel_enable_dvo(struct intel_encoder *encoder)
145{
146 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
147 struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
103 u32 dvo_reg = intel_dvo->dev.dvo_reg; 148 u32 dvo_reg = intel_dvo->dev.dvo_reg;
104 u32 temp = I915_READ(dvo_reg); 149 u32 temp = I915_READ(dvo_reg);
105 150
151 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
152 I915_READ(dvo_reg);
153 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
154}
155
156static void intel_dvo_dpms(struct drm_connector *connector, int mode)
157{
158 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
159 struct drm_crtc *crtc;
160
161 /* dvo supports only 2 dpms states. */
162 if (mode != DRM_MODE_DPMS_ON)
163 mode = DRM_MODE_DPMS_OFF;
164
165 if (mode == connector->dpms)
166 return;
167
168 connector->dpms = mode;
169
170 /* Only need to change hw state when actually enabled */
171 crtc = intel_dvo->base.base.crtc;
172 if (!crtc) {
173 intel_dvo->base.connectors_active = false;
174 return;
175 }
176
106 if (mode == DRM_MODE_DPMS_ON) { 177 if (mode == DRM_MODE_DPMS_ON) {
107 I915_WRITE(dvo_reg, temp | DVO_ENABLE); 178 intel_dvo->base.connectors_active = true;
108 I915_READ(dvo_reg); 179
109 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); 180 intel_crtc_update_dpms(crtc);
181
182 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
110 } else { 183 } else {
111 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode); 184 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
112 I915_WRITE(dvo_reg, temp & ~DVO_ENABLE); 185
113 I915_READ(dvo_reg); 186 intel_dvo->base.connectors_active = false;
187
188 intel_crtc_update_dpms(crtc);
114 } 189 }
190
191 intel_modeset_check_state(connector->dev);
115} 192}
116 193
117static int intel_dvo_mode_valid(struct drm_connector *connector, 194static int intel_dvo_mode_valid(struct drm_connector *connector,
@@ -266,15 +343,13 @@ static void intel_dvo_destroy(struct drm_connector *connector)
266} 343}
267 344
268static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 345static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
269 .dpms = intel_dvo_dpms,
270 .mode_fixup = intel_dvo_mode_fixup, 346 .mode_fixup = intel_dvo_mode_fixup,
271 .prepare = intel_encoder_prepare,
272 .mode_set = intel_dvo_mode_set, 347 .mode_set = intel_dvo_mode_set,
273 .commit = intel_encoder_commit, 348 .disable = intel_encoder_noop,
274}; 349};
275 350
276static const struct drm_connector_funcs intel_dvo_connector_funcs = { 351static const struct drm_connector_funcs intel_dvo_connector_funcs = {
277 .dpms = drm_helper_connector_dpms, 352 .dpms = intel_dvo_dpms,
278 .detect = intel_dvo_detect, 353 .detect = intel_dvo_detect,
279 .destroy = intel_dvo_destroy, 354 .destroy = intel_dvo_destroy,
280 .fill_modes = drm_helper_probe_single_connector_modes, 355 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -363,6 +438,11 @@ void intel_dvo_init(struct drm_device *dev)
363 drm_encoder_init(dev, &intel_encoder->base, 438 drm_encoder_init(dev, &intel_encoder->base,
364 &intel_dvo_enc_funcs, encoder_type); 439 &intel_dvo_enc_funcs, encoder_type);
365 440
441 intel_encoder->disable = intel_disable_dvo;
442 intel_encoder->enable = intel_enable_dvo;
443 intel_encoder->get_hw_state = intel_dvo_get_hw_state;
444 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
445
366 /* Now, try to find a controller */ 446 /* Now, try to find a controller */
367 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 447 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
368 struct drm_connector *connector = &intel_connector->base; 448 struct drm_connector *connector = &intel_connector->base;
@@ -395,17 +475,14 @@ void intel_dvo_init(struct drm_device *dev)
395 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 475 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
396 switch (dvo->type) { 476 switch (dvo->type) {
397 case INTEL_DVO_CHIP_TMDS: 477 case INTEL_DVO_CHIP_TMDS:
398 intel_encoder->clone_mask = 478 intel_encoder->cloneable = true;
399 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
400 (1 << INTEL_ANALOG_CLONE_BIT);
401 drm_connector_init(dev, connector, 479 drm_connector_init(dev, connector,
402 &intel_dvo_connector_funcs, 480 &intel_dvo_connector_funcs,
403 DRM_MODE_CONNECTOR_DVII); 481 DRM_MODE_CONNECTOR_DVII);
404 encoder_type = DRM_MODE_ENCODER_TMDS; 482 encoder_type = DRM_MODE_ENCODER_TMDS;
405 break; 483 break;
406 case INTEL_DVO_CHIP_LVDS: 484 case INTEL_DVO_CHIP_LVDS:
407 intel_encoder->clone_mask = 485 intel_encoder->cloneable = false;
408 (1 << INTEL_DVO_LVDS_CLONE_BIT);
409 drm_connector_init(dev, connector, 486 drm_connector_init(dev, connector,
410 &intel_dvo_connector_funcs, 487 &intel_dvo_connector_funcs,
411 DRM_MODE_CONNECTOR_LVDS); 488 DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 025be7dd2a27..9ba0aaed7ee8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -150,6 +150,9 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
150 I915_WRITE(VIDEO_DIP_DATA, *data); 150 I915_WRITE(VIDEO_DIP_DATA, *data);
151 data++; 151 data++;
152 } 152 }
153 /* Write every possible data byte to force correct ECC calculation. */
154 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
155 I915_WRITE(VIDEO_DIP_DATA, 0);
153 mmiowb(); 156 mmiowb();
154 157
155 val |= g4x_infoframe_enable(frame); 158 val |= g4x_infoframe_enable(frame);
@@ -185,6 +188,9 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
185 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 188 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
186 data++; 189 data++;
187 } 190 }
191 /* Write every possible data byte to force correct ECC calculation. */
192 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
193 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
188 mmiowb(); 194 mmiowb();
189 195
190 val |= g4x_infoframe_enable(frame); 196 val |= g4x_infoframe_enable(frame);
@@ -223,6 +229,9 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
223 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 229 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
224 data++; 230 data++;
225 } 231 }
232 /* Write every possible data byte to force correct ECC calculation. */
233 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
234 I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
226 mmiowb(); 235 mmiowb();
227 236
228 val |= g4x_infoframe_enable(frame); 237 val |= g4x_infoframe_enable(frame);
@@ -258,6 +267,9 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
258 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data); 267 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
259 data++; 268 data++;
260 } 269 }
270 /* Write every possible data byte to force correct ECC calculation. */
271 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
272 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
261 mmiowb(); 273 mmiowb();
262 274
263 val |= g4x_infoframe_enable(frame); 275 val |= g4x_infoframe_enable(frame);
@@ -291,6 +303,9 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
291 I915_WRITE(data_reg + i, *data); 303 I915_WRITE(data_reg + i, *data);
292 data++; 304 data++;
293 } 305 }
306 /* Write every possible data byte to force correct ECC calculation. */
307 for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
308 I915_WRITE(data_reg + i, 0);
294 mmiowb(); 309 mmiowb();
295 310
296 val |= hsw_infoframe_enable(frame); 311 val |= hsw_infoframe_enable(frame);
@@ -376,6 +391,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
376 port = VIDEO_DIP_PORT_C; 391 port = VIDEO_DIP_PORT_C;
377 break; 392 break;
378 default: 393 default:
394 BUG();
379 return; 395 return;
380 } 396 }
381 397
@@ -434,6 +450,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
434 port = VIDEO_DIP_PORT_D; 450 port = VIDEO_DIP_PORT_D;
435 break; 451 break;
436 default: 452 default:
453 BUG();
437 return; 454 return;
438 } 455 }
439 456
@@ -600,15 +617,36 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
600 intel_hdmi->set_infoframes(encoder, adjusted_mode); 617 intel_hdmi->set_infoframes(encoder, adjusted_mode);
601} 618}
602 619
603static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) 620static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
621 enum pipe *pipe)
604{ 622{
605 struct drm_device *dev = encoder->dev; 623 struct drm_device *dev = encoder->base.dev;
606 struct drm_i915_private *dev_priv = dev->dev_private; 624 struct drm_i915_private *dev_priv = dev->dev_private;
607 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 625 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
626 u32 tmp;
627
628 tmp = I915_READ(intel_hdmi->sdvox_reg);
629
630 if (!(tmp & SDVO_ENABLE))
631 return false;
632
633 if (HAS_PCH_CPT(dev))
634 *pipe = PORT_TO_PIPE_CPT(tmp);
635 else
636 *pipe = PORT_TO_PIPE(tmp);
637
638 return true;
639}
640
641static void intel_enable_hdmi(struct intel_encoder *encoder)
642{
643 struct drm_device *dev = encoder->base.dev;
644 struct drm_i915_private *dev_priv = dev->dev_private;
645 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
608 u32 temp; 646 u32 temp;
609 u32 enable_bits = SDVO_ENABLE; 647 u32 enable_bits = SDVO_ENABLE;
610 648
611 if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) 649 if (intel_hdmi->has_audio)
612 enable_bits |= SDVO_AUDIO_ENABLE; 650 enable_bits |= SDVO_AUDIO_ENABLE;
613 651
614 temp = I915_READ(intel_hdmi->sdvox_reg); 652 temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -616,31 +654,12 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
616 /* HW workaround for IBX, we need to move the port to transcoder A 654 /* HW workaround for IBX, we need to move the port to transcoder A
617 * before disabling it. */ 655 * before disabling it. */
618 if (HAS_PCH_IBX(dev)) { 656 if (HAS_PCH_IBX(dev)) {
619 struct drm_crtc *crtc = encoder->crtc; 657 struct drm_crtc *crtc = encoder->base.crtc;
620 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; 658 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
621 659
622 if (mode != DRM_MODE_DPMS_ON) { 660 /* Restore the transcoder select bit. */
623 if (temp & SDVO_PIPE_B_SELECT) { 661 if (pipe == PIPE_B)
624 temp &= ~SDVO_PIPE_B_SELECT; 662 enable_bits |= SDVO_PIPE_B_SELECT;
625 I915_WRITE(intel_hdmi->sdvox_reg, temp);
626 POSTING_READ(intel_hdmi->sdvox_reg);
627
628 /* Again we need to write this twice. */
629 I915_WRITE(intel_hdmi->sdvox_reg, temp);
630 POSTING_READ(intel_hdmi->sdvox_reg);
631
632 /* Transcoder selection bits only update
633 * effectively on vblank. */
634 if (crtc)
635 intel_wait_for_vblank(dev, pipe);
636 else
637 msleep(50);
638 }
639 } else {
640 /* Restore the transcoder select bit. */
641 if (pipe == PIPE_B)
642 enable_bits |= SDVO_PIPE_B_SELECT;
643 }
644 } 663 }
645 664
646 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 665 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
@@ -651,12 +670,64 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
651 POSTING_READ(intel_hdmi->sdvox_reg); 670 POSTING_READ(intel_hdmi->sdvox_reg);
652 } 671 }
653 672
654 if (mode != DRM_MODE_DPMS_ON) { 673 temp |= enable_bits;
655 temp &= ~enable_bits; 674
656 } else { 675 I915_WRITE(intel_hdmi->sdvox_reg, temp);
657 temp |= enable_bits; 676 POSTING_READ(intel_hdmi->sdvox_reg);
677
678 /* HW workaround, need to write this twice for issue that may result
679 * in first write getting masked.
680 */
681 if (HAS_PCH_SPLIT(dev)) {
682 I915_WRITE(intel_hdmi->sdvox_reg, temp);
683 POSTING_READ(intel_hdmi->sdvox_reg);
684 }
685}
686
687static void intel_disable_hdmi(struct intel_encoder *encoder)
688{
689 struct drm_device *dev = encoder->base.dev;
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
692 u32 temp;
693 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
694
695 temp = I915_READ(intel_hdmi->sdvox_reg);
696
697 /* HW workaround for IBX, we need to move the port to transcoder A
698 * before disabling it. */
699 if (HAS_PCH_IBX(dev)) {
700 struct drm_crtc *crtc = encoder->base.crtc;
701 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
702
703 if (temp & SDVO_PIPE_B_SELECT) {
704 temp &= ~SDVO_PIPE_B_SELECT;
705 I915_WRITE(intel_hdmi->sdvox_reg, temp);
706 POSTING_READ(intel_hdmi->sdvox_reg);
707
708 /* Again we need to write this twice. */
709 I915_WRITE(intel_hdmi->sdvox_reg, temp);
710 POSTING_READ(intel_hdmi->sdvox_reg);
711
712 /* Transcoder selection bits only update
713 * effectively on vblank. */
714 if (crtc)
715 intel_wait_for_vblank(dev, pipe);
716 else
717 msleep(50);
718 }
719 }
720
721 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
722 * we do this anyway which shows more stable in testing.
723 */
724 if (HAS_PCH_SPLIT(dev)) {
725 I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
726 POSTING_READ(intel_hdmi->sdvox_reg);
658 } 727 }
659 728
729 temp &= ~enable_bits;
730
660 I915_WRITE(intel_hdmi->sdvox_reg, temp); 731 I915_WRITE(intel_hdmi->sdvox_reg, temp);
661 POSTING_READ(intel_hdmi->sdvox_reg); 732 POSTING_READ(intel_hdmi->sdvox_reg);
662 733
@@ -736,7 +807,6 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
736 drm_detect_hdmi_monitor(edid); 807 drm_detect_hdmi_monitor(edid);
737 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 808 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
738 } 809 }
739 connector->display_info.raw_edid = NULL;
740 kfree(edid); 810 kfree(edid);
741 } 811 }
742 812
@@ -777,8 +847,6 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
777 if (edid) { 847 if (edid) {
778 if (edid->input & DRM_EDID_INPUT_DIGITAL) 848 if (edid->input & DRM_EDID_INPUT_DIGITAL)
779 has_audio = drm_detect_monitor_audio(edid); 849 has_audio = drm_detect_monitor_audio(edid);
780
781 connector->display_info.raw_edid = NULL;
782 kfree(edid); 850 kfree(edid);
783 } 851 }
784 852
@@ -832,9 +900,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
832done: 900done:
833 if (intel_hdmi->base.base.crtc) { 901 if (intel_hdmi->base.base.crtc) {
834 struct drm_crtc *crtc = intel_hdmi->base.base.crtc; 902 struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
835 drm_crtc_helper_set_mode(crtc, &crtc->mode, 903 intel_set_mode(crtc, &crtc->mode,
836 crtc->x, crtc->y, 904 crtc->x, crtc->y, crtc->fb);
837 crtc->fb);
838 } 905 }
839 906
840 return 0; 907 return 0;
@@ -848,23 +915,19 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
848} 915}
849 916
850static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = { 917static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
851 .dpms = intel_ddi_dpms,
852 .mode_fixup = intel_hdmi_mode_fixup, 918 .mode_fixup = intel_hdmi_mode_fixup,
853 .prepare = intel_encoder_prepare,
854 .mode_set = intel_ddi_mode_set, 919 .mode_set = intel_ddi_mode_set,
855 .commit = intel_encoder_commit, 920 .disable = intel_encoder_noop,
856}; 921};
857 922
858static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 923static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
859 .dpms = intel_hdmi_dpms,
860 .mode_fixup = intel_hdmi_mode_fixup, 924 .mode_fixup = intel_hdmi_mode_fixup,
861 .prepare = intel_encoder_prepare,
862 .mode_set = intel_hdmi_mode_set, 925 .mode_set = intel_hdmi_mode_set,
863 .commit = intel_encoder_commit, 926 .disable = intel_encoder_noop,
864}; 927};
865 928
866static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 929static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
867 .dpms = drm_helper_connector_dpms, 930 .dpms = intel_connector_dpms,
868 .detect = intel_hdmi_detect, 931 .detect = intel_hdmi_detect,
869 .fill_modes = drm_helper_probe_single_connector_modes, 932 .fill_modes = drm_helper_probe_single_connector_modes,
870 .set_property = intel_hdmi_set_property, 933 .set_property = intel_hdmi_set_property,
@@ -888,7 +951,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
888 intel_attach_broadcast_rgb_property(connector); 951 intel_attach_broadcast_rgb_property(connector);
889} 952}
890 953
891void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 954void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
892{ 955{
893 struct drm_i915_private *dev_priv = dev->dev_private; 956 struct drm_i915_private *dev_priv = dev->dev_private;
894 struct drm_connector *connector; 957 struct drm_connector *connector;
@@ -922,48 +985,25 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
922 connector->doublescan_allowed = 0; 985 connector->doublescan_allowed = 0;
923 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 986 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
924 987
925 /* Set up the DDC bus. */ 988 intel_encoder->cloneable = false;
926 if (sdvox_reg == SDVOB) { 989
927 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 990 intel_hdmi->ddi_port = port;
928 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 991 switch (port) {
929 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 992 case PORT_B:
930 } else if (sdvox_reg == SDVOC) {
931 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
932 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
933 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
934 } else if (sdvox_reg == HDMIB) {
935 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
936 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
937 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
938 } else if (sdvox_reg == HDMIC) {
939 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
940 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
941 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
942 } else if (sdvox_reg == HDMID) {
943 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
944 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
945 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
946 } else if (sdvox_reg == DDI_BUF_CTL(PORT_B)) {
947 DRM_DEBUG_DRIVER("LPT: detected output on DDI B\n");
948 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
949 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 993 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
950 intel_hdmi->ddi_port = PORT_B;
951 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 994 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
952 } else if (sdvox_reg == DDI_BUF_CTL(PORT_C)) { 995 break;
953 DRM_DEBUG_DRIVER("LPT: detected output on DDI C\n"); 996 case PORT_C:
954 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
955 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 997 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
956 intel_hdmi->ddi_port = PORT_C;
957 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 998 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
958 } else if (sdvox_reg == DDI_BUF_CTL(PORT_D)) { 999 break;
959 DRM_DEBUG_DRIVER("LPT: detected output on DDI D\n"); 1000 case PORT_D:
960 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
961 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1001 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
962 intel_hdmi->ddi_port = PORT_D;
963 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 1002 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
964 } else { 1003 break;
965 /* If we got an unknown sdvox_reg, things are pretty much broken 1004 case PORT_A:
966 * in a way that we should let the kernel know about it */ 1005 /* Internal port only for eDP. */
1006 default:
967 BUG(); 1007 BUG();
968 } 1008 }
969 1009
@@ -986,10 +1026,21 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
986 intel_hdmi->set_infoframes = cpt_set_infoframes; 1026 intel_hdmi->set_infoframes = cpt_set_infoframes;
987 } 1027 }
988 1028
989 if (IS_HASWELL(dev)) 1029 if (IS_HASWELL(dev)) {
990 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs_hsw); 1030 intel_encoder->enable = intel_enable_ddi;
991 else 1031 intel_encoder->disable = intel_disable_ddi;
992 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); 1032 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1033 drm_encoder_helper_add(&intel_encoder->base,
1034 &intel_hdmi_helper_funcs_hsw);
1035 } else {
1036 intel_encoder->enable = intel_enable_hdmi;
1037 intel_encoder->disable = intel_disable_hdmi;
1038 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1039 drm_encoder_helper_add(&intel_encoder->base,
1040 &intel_hdmi_helper_funcs);
1041 }
1042 intel_connector->get_hw_state = intel_connector_get_hw_state;
1043
993 1044
994 intel_hdmi_add_properties(intel_hdmi, connector); 1045 intel_hdmi_add_properties(intel_hdmi, connector);
995 1046
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 8552be9f5db1..e3166df55daa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -64,13 +64,40 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
64 struct intel_lvds, base); 64 struct intel_lvds, base);
65} 65}
66 66
67static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
68 enum pipe *pipe)
69{
70 struct drm_device *dev = encoder->base.dev;
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 lvds_reg, tmp;
73
74 if (HAS_PCH_SPLIT(dev)) {
75 lvds_reg = PCH_LVDS;
76 } else {
77 lvds_reg = LVDS;
78 }
79
80 tmp = I915_READ(lvds_reg);
81
82 if (!(tmp & LVDS_PORT_EN))
83 return false;
84
85 if (HAS_PCH_CPT(dev))
86 *pipe = PORT_TO_PIPE_CPT(tmp);
87 else
88 *pipe = PORT_TO_PIPE(tmp);
89
90 return true;
91}
92
67/** 93/**
68 * Sets the power state for the panel. 94 * Sets the power state for the panel.
69 */ 95 */
70static void intel_lvds_enable(struct intel_lvds *intel_lvds) 96static void intel_enable_lvds(struct intel_encoder *encoder)
71{ 97{
72 struct drm_device *dev = intel_lvds->base.base.dev; 98 struct drm_device *dev = encoder->base.dev;
73 struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc); 99 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
100 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
74 struct drm_i915_private *dev_priv = dev->dev_private; 101 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 ctl_reg, lvds_reg, stat_reg; 102 u32 ctl_reg, lvds_reg, stat_reg;
76 103
@@ -110,9 +137,10 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
110 intel_panel_enable_backlight(dev, intel_crtc->pipe); 137 intel_panel_enable_backlight(dev, intel_crtc->pipe);
111} 138}
112 139
113static void intel_lvds_disable(struct intel_lvds *intel_lvds) 140static void intel_disable_lvds(struct intel_encoder *encoder)
114{ 141{
115 struct drm_device *dev = intel_lvds->base.base.dev; 142 struct drm_device *dev = encoder->base.dev;
143 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
116 struct drm_i915_private *dev_priv = dev->dev_private; 144 struct drm_i915_private *dev_priv = dev->dev_private;
117 u32 ctl_reg, lvds_reg, stat_reg; 145 u32 ctl_reg, lvds_reg, stat_reg;
118 146
@@ -141,18 +169,6 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds)
141 POSTING_READ(lvds_reg); 169 POSTING_READ(lvds_reg);
142} 170}
143 171
144static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
145{
146 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
147
148 if (mode == DRM_MODE_DPMS_ON)
149 intel_lvds_enable(intel_lvds);
150 else
151 intel_lvds_disable(intel_lvds);
152
153 /* XXX: We never power down the LVDS pairs. */
154}
155
156static int intel_lvds_mode_valid(struct drm_connector *connector, 172static int intel_lvds_mode_valid(struct drm_connector *connector,
157 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
158{ 174{
@@ -233,9 +249,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
233{ 249{
234 struct drm_device *dev = encoder->dev; 250 struct drm_device *dev = encoder->dev;
235 struct drm_i915_private *dev_priv = dev->dev_private; 251 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
237 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 252 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
238 struct intel_encoder *tmp_encoder; 253 struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
239 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 254 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
240 int pipe; 255 int pipe;
241 256
@@ -245,14 +260,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
245 return false; 260 return false;
246 } 261 }
247 262
248 /* Should never happen!! */ 263 if (intel_encoder_check_is_cloned(&intel_lvds->base))
249 for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) { 264 return false;
250 if (&tmp_encoder->base != encoder) {
251 DRM_ERROR("Can't enable LVDS and another "
252 "encoder on the same pipe\n");
253 return false;
254 }
255 }
256 265
257 /* 266 /*
258 * We have timings from the BIOS for the panel, put them in 267 * We have timings from the BIOS for the panel, put them in
@@ -404,23 +413,6 @@ out:
404 return true; 413 return true;
405} 414}
406 415
407static void intel_lvds_prepare(struct drm_encoder *encoder)
408{
409 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
410
411 intel_lvds_disable(intel_lvds);
412}
413
414static void intel_lvds_commit(struct drm_encoder *encoder)
415{
416 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
417
418 /* Always do a full power on as we do not know what state
419 * we were left in.
420 */
421 intel_lvds_enable(intel_lvds);
422}
423
424static void intel_lvds_mode_set(struct drm_encoder *encoder, 416static void intel_lvds_mode_set(struct drm_encoder *encoder,
425 struct drm_display_mode *mode, 417 struct drm_display_mode *mode,
426 struct drm_display_mode *adjusted_mode) 418 struct drm_display_mode *adjusted_mode)
@@ -534,7 +526,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
534 dev_priv->modeset_on_lid = 0; 526 dev_priv->modeset_on_lid = 0;
535 527
536 mutex_lock(&dev->mode_config.mutex); 528 mutex_lock(&dev->mode_config.mutex);
537 drm_helper_resume_force_mode(dev); 529 intel_modeset_check_state(dev);
538 mutex_unlock(&dev->mode_config.mutex); 530 mutex_unlock(&dev->mode_config.mutex);
539 531
540 return NOTIFY_OK; 532 return NOTIFY_OK;
@@ -586,8 +578,8 @@ static int intel_lvds_set_property(struct drm_connector *connector,
586 * If the CRTC is enabled, the display will be changed 578 * If the CRTC is enabled, the display will be changed
587 * according to the new panel fitting mode. 579 * according to the new panel fitting mode.
588 */ 580 */
589 drm_crtc_helper_set_mode(crtc, &crtc->mode, 581 intel_set_mode(crtc, &crtc->mode,
590 crtc->x, crtc->y, crtc->fb); 582 crtc->x, crtc->y, crtc->fb);
591 } 583 }
592 } 584 }
593 585
@@ -595,11 +587,9 @@ static int intel_lvds_set_property(struct drm_connector *connector,
595} 587}
596 588
597static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 589static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
598 .dpms = intel_lvds_dpms,
599 .mode_fixup = intel_lvds_mode_fixup, 590 .mode_fixup = intel_lvds_mode_fixup,
600 .prepare = intel_lvds_prepare,
601 .mode_set = intel_lvds_mode_set, 591 .mode_set = intel_lvds_mode_set,
602 .commit = intel_lvds_commit, 592 .disable = intel_encoder_noop,
603}; 593};
604 594
605static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 595static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -609,7 +599,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
609}; 599};
610 600
611static const struct drm_connector_funcs intel_lvds_connector_funcs = { 601static const struct drm_connector_funcs intel_lvds_connector_funcs = {
612 .dpms = drm_helper_connector_dpms, 602 .dpms = intel_connector_dpms,
613 .detect = intel_lvds_detect, 603 .detect = intel_lvds_detect,
614 .fill_modes = drm_helper_probe_single_connector_modes, 604 .fill_modes = drm_helper_probe_single_connector_modes,
615 .set_property = intel_lvds_set_property, 605 .set_property = intel_lvds_set_property,
@@ -971,10 +961,15 @@ bool intel_lvds_init(struct drm_device *dev)
971 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 961 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
972 DRM_MODE_ENCODER_LVDS); 962 DRM_MODE_ENCODER_LVDS);
973 963
964 intel_encoder->enable = intel_enable_lvds;
965 intel_encoder->disable = intel_disable_lvds;
966 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
967 intel_connector->get_hw_state = intel_connector_get_hw_state;
968
974 intel_connector_attach_encoder(intel_connector, intel_encoder); 969 intel_connector_attach_encoder(intel_connector, intel_encoder);
975 intel_encoder->type = INTEL_OUTPUT_LVDS; 970 intel_encoder->type = INTEL_OUTPUT_LVDS;
976 971
977 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 972 intel_encoder->cloneable = false;
978 if (HAS_PCH_SPLIT(dev)) 973 if (HAS_PCH_SPLIT(dev))
979 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 974 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
980 else if (IS_GEN4(dev)) 975 else if (IS_GEN4(dev))
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cc71fd9aaed5..cabd84bf66eb 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
45 drm_mode_connector_update_edid_property(connector, edid); 45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid); 46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid); 47 drm_edid_to_eld(connector, edid);
48 connector->display_info.raw_edid = NULL;
49 kfree(edid); 48 kfree(edid);
50 49
51 return ret; 50 return ret;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5cc624eb6133..5530413213d8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -427,6 +427,25 @@ blind_set:
427 goto end; 427 goto end;
428} 428}
429 429
430static void intel_setup_cadls(struct drm_device *dev)
431{
432 struct drm_i915_private *dev_priv = dev->dev_private;
433 struct intel_opregion *opregion = &dev_priv->opregion;
434 int i = 0;
435 u32 disp_id;
436
437 /* Initialize the CADL field by duplicating the DIDL values.
438 * Technically, this is not always correct as display outputs may exist,
439 * but not active. This initialization is necessary for some Clevo
440 * laptops that check this field before processing the brightness and
441 * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if
442 * there are less than eight devices. */
443 do {
444 disp_id = ioread32(&opregion->acpi->didl[i]);
445 iowrite32(disp_id, &opregion->acpi->cadl[i]);
446 } while (++i < 8 && disp_id != 0);
447}
448
430void intel_opregion_init(struct drm_device *dev) 449void intel_opregion_init(struct drm_device *dev)
431{ 450{
432 struct drm_i915_private *dev_priv = dev->dev_private; 451 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -436,8 +455,10 @@ void intel_opregion_init(struct drm_device *dev)
436 return; 455 return;
437 456
438 if (opregion->acpi) { 457 if (opregion->acpi) {
439 if (drm_core_check_feature(dev, DRIVER_MODESET)) 458 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
440 intel_didl_outputs(dev); 459 intel_didl_outputs(dev);
460 intel_setup_cadls(dev);
461 }
441 462
442 /* Notify BIOS we are ready to handle ACPI video ext notifs. 463 /* Notify BIOS we are ready to handle ACPI video ext notifs.
443 * Right now, all the events are handled by the ACPI video module. 464 * Right now, all the events are handled by the ACPI video module.
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 10510221d763..ebff850a9ab6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -234,54 +234,6 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
234 return 0; 234 return 0;
235} 235}
236 236
237/* Workaround for i830 bug where pipe a must be enable to change control regs */
238static int
239i830_activate_pipe_a(struct drm_device *dev)
240{
241 drm_i915_private_t *dev_priv = dev->dev_private;
242 struct intel_crtc *crtc;
243 struct drm_crtc_helper_funcs *crtc_funcs;
244 struct drm_display_mode vesa_640x480 = {
245 DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
246 752, 800, 0, 480, 489, 492, 525, 0,
247 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
248 }, *mode;
249
250 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[0]);
251 if (crtc->dpms_mode == DRM_MODE_DPMS_ON)
252 return 0;
253
254 /* most i8xx have pipe a forced on, so don't trust dpms mode */
255 if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
256 return 0;
257
258 crtc_funcs = crtc->base.helper_private;
259 if (crtc_funcs->dpms == NULL)
260 return 0;
261
262 DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
263
264 mode = drm_mode_duplicate(dev, &vesa_640x480);
265
266 if (!drm_crtc_helper_set_mode(&crtc->base, mode,
267 crtc->base.x, crtc->base.y,
268 crtc->base.fb))
269 return 0;
270
271 crtc_funcs->dpms(&crtc->base, DRM_MODE_DPMS_ON);
272 return 1;
273}
274
275static void
276i830_deactivate_pipe_a(struct drm_device *dev)
277{
278 drm_i915_private_t *dev_priv = dev->dev_private;
279 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[0];
280 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
281
282 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
283}
284
285/* overlay needs to be disable in OCMD reg */ 237/* overlay needs to be disable in OCMD reg */
286static int intel_overlay_on(struct intel_overlay *overlay) 238static int intel_overlay_on(struct intel_overlay *overlay)
287{ 239{
@@ -289,17 +241,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
289 struct drm_i915_private *dev_priv = dev->dev_private; 241 struct drm_i915_private *dev_priv = dev->dev_private;
290 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 242 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
291 struct drm_i915_gem_request *request; 243 struct drm_i915_gem_request *request;
292 int pipe_a_quirk = 0;
293 int ret; 244 int ret;
294 245
295 BUG_ON(overlay->active); 246 BUG_ON(overlay->active);
296 overlay->active = 1; 247 overlay->active = 1;
297 248
298 if (IS_I830(dev)) { 249 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
299 pipe_a_quirk = i830_activate_pipe_a(dev);
300 if (pipe_a_quirk < 0)
301 return pipe_a_quirk;
302 }
303 250
304 request = kzalloc(sizeof(*request), GFP_KERNEL); 251 request = kzalloc(sizeof(*request), GFP_KERNEL);
305 if (request == NULL) { 252 if (request == NULL) {
@@ -321,9 +268,6 @@ static int intel_overlay_on(struct intel_overlay *overlay)
321 268
322 ret = intel_overlay_do_wait_request(overlay, request, NULL); 269 ret = intel_overlay_do_wait_request(overlay, request, NULL);
323out: 270out:
324 if (pipe_a_quirk)
325 i830_deactivate_pipe_a(dev);
326
327 return ret; 271 return ret;
328} 272}
329 273
@@ -1438,7 +1382,7 @@ void intel_setup_overlay(struct drm_device *dev)
1438 } 1382 }
1439 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; 1383 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1440 } else { 1384 } else {
1441 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); 1385 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
1442 if (ret) { 1386 if (ret) {
1443 DRM_ERROR("failed to pin overlay register bo\n"); 1387 DRM_ERROR("failed to pin overlay register bo\n");
1444 goto out_free_bo; 1388 goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ba8a27b1757a..d69f8f49beb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -31,6 +31,8 @@
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33 33
34#define FORCEWAKE_ACK_TIMEOUT_MS 2
35
34/* FBC, or Frame Buffer Compression, is a technique employed to compress the 36/* FBC, or Frame Buffer Compression, is a technique employed to compress the
35 * framebuffer contents in-memory, aiming at reducing the required bandwidth 37 * framebuffer contents in-memory, aiming at reducing the required bandwidth
36 * during in-memory transfers and, therefore, reduce the power packet. 38 * during in-memory transfers and, therefore, reduce the power packet.
@@ -593,7 +595,7 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
593 break; 595 break;
594 } 596 }
595 597
596 dev_priv->r_t = dev_priv->mem_freq; 598 dev_priv->ips.r_t = dev_priv->mem_freq;
597 599
598 switch (csipll & 0x3ff) { 600 switch (csipll & 0x3ff) {
599 case 0x00c: 601 case 0x00c:
@@ -625,11 +627,11 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
625 } 627 }
626 628
627 if (dev_priv->fsb_freq == 3200) { 629 if (dev_priv->fsb_freq == 3200) {
628 dev_priv->c_m = 0; 630 dev_priv->ips.c_m = 0;
629 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 631 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
630 dev_priv->c_m = 1; 632 dev_priv->ips.c_m = 1;
631 } else { 633 } else {
632 dev_priv->c_m = 2; 634 dev_priv->ips.c_m = 2;
633 } 635 }
634} 636}
635 637
@@ -2138,7 +2140,7 @@ intel_alloc_context_page(struct drm_device *dev)
2138 return NULL; 2140 return NULL;
2139 } 2141 }
2140 2142
2141 ret = i915_gem_object_pin(ctx, 4096, true); 2143 ret = i915_gem_object_pin(ctx, 4096, true, false);
2142 if (ret) { 2144 if (ret) {
2143 DRM_ERROR("failed to pin power context: %d\n", ret); 2145 DRM_ERROR("failed to pin power context: %d\n", ret);
2144 goto err_unref; 2146 goto err_unref;
@@ -2160,11 +2162,22 @@ err_unref:
2160 return NULL; 2162 return NULL;
2161} 2163}
2162 2164
2165/**
2166 * Lock protecting IPS related data structures
2167 */
2168DEFINE_SPINLOCK(mchdev_lock);
2169
2170/* Global for IPS driver to get at the current i915 device. Protected by
2171 * mchdev_lock. */
2172static struct drm_i915_private *i915_mch_dev;
2173
2163bool ironlake_set_drps(struct drm_device *dev, u8 val) 2174bool ironlake_set_drps(struct drm_device *dev, u8 val)
2164{ 2175{
2165 struct drm_i915_private *dev_priv = dev->dev_private; 2176 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u16 rgvswctl; 2177 u16 rgvswctl;
2167 2178
2179 assert_spin_locked(&mchdev_lock);
2180
2168 rgvswctl = I915_READ16(MEMSWCTL); 2181 rgvswctl = I915_READ16(MEMSWCTL);
2169 if (rgvswctl & MEMCTL_CMD_STS) { 2182 if (rgvswctl & MEMCTL_CMD_STS) {
2170 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2183 DRM_DEBUG("gpu busy, RCS change rejected\n");
@@ -2188,6 +2201,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
2188 u32 rgvmodectl = I915_READ(MEMMODECTL); 2201 u32 rgvmodectl = I915_READ(MEMMODECTL);
2189 u8 fmax, fmin, fstart, vstart; 2202 u8 fmax, fmin, fstart, vstart;
2190 2203
2204 spin_lock_irq(&mchdev_lock);
2205
2191 /* Enable temp reporting */ 2206 /* Enable temp reporting */
2192 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2207 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2193 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2208 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
@@ -2211,12 +2226,12 @@ static void ironlake_enable_drps(struct drm_device *dev)
2211 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 2226 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2212 PXVFREQ_PX_SHIFT; 2227 PXVFREQ_PX_SHIFT;
2213 2228
2214 dev_priv->fmax = fmax; /* IPS callback will increase this */ 2229 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2215 dev_priv->fstart = fstart; 2230 dev_priv->ips.fstart = fstart;
2216 2231
2217 dev_priv->max_delay = fstart; 2232 dev_priv->ips.max_delay = fstart;
2218 dev_priv->min_delay = fmin; 2233 dev_priv->ips.min_delay = fmin;
2219 dev_priv->cur_delay = fstart; 2234 dev_priv->ips.cur_delay = fstart;
2220 2235
2221 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 2236 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2222 fmax, fmin, fstart); 2237 fmax, fmin, fstart);
@@ -2233,23 +2248,29 @@ static void ironlake_enable_drps(struct drm_device *dev)
2233 rgvmodectl |= MEMMODE_SWMODE_EN; 2248 rgvmodectl |= MEMMODE_SWMODE_EN;
2234 I915_WRITE(MEMMODECTL, rgvmodectl); 2249 I915_WRITE(MEMMODECTL, rgvmodectl);
2235 2250
2236 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 2251 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2237 DRM_ERROR("stuck trying to change perf mode\n"); 2252 DRM_ERROR("stuck trying to change perf mode\n");
2238 msleep(1); 2253 mdelay(1);
2239 2254
2240 ironlake_set_drps(dev, fstart); 2255 ironlake_set_drps(dev, fstart);
2241 2256
2242 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 2257 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2243 I915_READ(0x112e0); 2258 I915_READ(0x112e0);
2244 dev_priv->last_time1 = jiffies_to_msecs(jiffies); 2259 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2245 dev_priv->last_count2 = I915_READ(0x112f4); 2260 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2246 getrawmonotonic(&dev_priv->last_time2); 2261 getrawmonotonic(&dev_priv->ips.last_time2);
2262
2263 spin_unlock_irq(&mchdev_lock);
2247} 2264}
2248 2265
2249static void ironlake_disable_drps(struct drm_device *dev) 2266static void ironlake_disable_drps(struct drm_device *dev)
2250{ 2267{
2251 struct drm_i915_private *dev_priv = dev->dev_private; 2268 struct drm_i915_private *dev_priv = dev->dev_private;
2252 u16 rgvswctl = I915_READ16(MEMSWCTL); 2269 u16 rgvswctl;
2270
2271 spin_lock_irq(&mchdev_lock);
2272
2273 rgvswctl = I915_READ16(MEMSWCTL);
2253 2274
2254 /* Ack interrupts, disable EFC interrupt */ 2275 /* Ack interrupts, disable EFC interrupt */
2255 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 2276 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
@@ -2259,31 +2280,54 @@ static void ironlake_disable_drps(struct drm_device *dev)
2259 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 2280 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2260 2281
2261 /* Go back to the starting frequency */ 2282 /* Go back to the starting frequency */
2262 ironlake_set_drps(dev, dev_priv->fstart); 2283 ironlake_set_drps(dev, dev_priv->ips.fstart);
2263 msleep(1); 2284 mdelay(1);
2264 rgvswctl |= MEMCTL_CMD_STS; 2285 rgvswctl |= MEMCTL_CMD_STS;
2265 I915_WRITE(MEMSWCTL, rgvswctl); 2286 I915_WRITE(MEMSWCTL, rgvswctl);
2266 msleep(1); 2287 mdelay(1);
2267 2288
2289 spin_unlock_irq(&mchdev_lock);
2268} 2290}
2269 2291
2270void gen6_set_rps(struct drm_device *dev, u8 val) 2292/* There's a funny hw issue where the hw returns all 0 when reading from
2293 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2294 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2295 * all limits and the gpu stuck at whatever frequency it is at atm).
2296 */
2297static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2271{ 2298{
2272 struct drm_i915_private *dev_priv = dev->dev_private;
2273 u32 limits; 2299 u32 limits;
2274 2300
2275 limits = 0; 2301 limits = 0;
2276 if (val >= dev_priv->max_delay)
2277 val = dev_priv->max_delay;
2278 else
2279 limits |= dev_priv->max_delay << 24;
2280 2302
2281 if (val <= dev_priv->min_delay) 2303 if (*val >= dev_priv->rps.max_delay)
2282 val = dev_priv->min_delay; 2304 *val = dev_priv->rps.max_delay;
2283 else 2305 limits |= dev_priv->rps.max_delay << 24;
2284 limits |= dev_priv->min_delay << 16; 2306
2307 /* Only set the down limit when we've reached the lowest level to avoid
2308 * getting more interrupts, otherwise leave this clear. This prevents a
2309 * race in the hw when coming out of rc6: There's a tiny window where
2310 * the hw runs at the minimal clock before selecting the desired
2311 * frequency, if the down threshold expires in that window we will not
2312 * receive a down interrupt. */
2313 if (*val <= dev_priv->rps.min_delay) {
2314 *val = dev_priv->rps.min_delay;
2315 limits |= dev_priv->rps.min_delay << 16;
2316 }
2285 2317
2286 if (val == dev_priv->cur_delay) 2318 return limits;
2319}
2320
2321void gen6_set_rps(struct drm_device *dev, u8 val)
2322{
2323 struct drm_i915_private *dev_priv = dev->dev_private;
2324 u32 limits = gen6_rps_limits(dev_priv, &val);
2325
2326 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2327 WARN_ON(val > dev_priv->rps.max_delay);
2328 WARN_ON(val < dev_priv->rps.min_delay);
2329
2330 if (val == dev_priv->rps.cur_delay)
2287 return; 2331 return;
2288 2332
2289 I915_WRITE(GEN6_RPNSWREQ, 2333 I915_WRITE(GEN6_RPNSWREQ,
@@ -2296,7 +2340,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2296 */ 2340 */
2297 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 2341 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2298 2342
2299 dev_priv->cur_delay = val; 2343 POSTING_READ(GEN6_RPNSWREQ);
2344
2345 dev_priv->rps.cur_delay = val;
2346
2347 trace_intel_gpu_freq_change(val * 50);
2300} 2348}
2301 2349
2302static void gen6_disable_rps(struct drm_device *dev) 2350static void gen6_disable_rps(struct drm_device *dev)
@@ -2312,40 +2360,40 @@ static void gen6_disable_rps(struct drm_device *dev)
2312 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 2360 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2313 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 2361 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2314 2362
2315 spin_lock_irq(&dev_priv->rps_lock); 2363 spin_lock_irq(&dev_priv->rps.lock);
2316 dev_priv->pm_iir = 0; 2364 dev_priv->rps.pm_iir = 0;
2317 spin_unlock_irq(&dev_priv->rps_lock); 2365 spin_unlock_irq(&dev_priv->rps.lock);
2318 2366
2319 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2367 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2320} 2368}
2321 2369
2322int intel_enable_rc6(const struct drm_device *dev) 2370int intel_enable_rc6(const struct drm_device *dev)
2323{ 2371{
2324 /* 2372 /* Respect the kernel parameter if it is set */
2325 * Respect the kernel parameter if it is set
2326 */
2327 if (i915_enable_rc6 >= 0) 2373 if (i915_enable_rc6 >= 0)
2328 return i915_enable_rc6; 2374 return i915_enable_rc6;
2329 2375
2330 /* 2376 if (INTEL_INFO(dev)->gen == 5) {
2331 * Disable RC6 on Ironlake 2377#ifdef CONFIG_INTEL_IOMMU
2332 */ 2378 /* Disable rc6 on ilk if VT-d is on. */
2333 if (INTEL_INFO(dev)->gen == 5) 2379 if (intel_iommu_gfx_mapped)
2334 return 0; 2380 return false;
2381#endif
2382 DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
2383 return INTEL_RC6_ENABLE;
2384 }
2335 2385
2336 /* On Haswell, only RC6 is available. So let's enable it by default to 2386 if (IS_HASWELL(dev)) {
2337 * provide better testing and coverage since the beginning. 2387 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2338 */
2339 if (IS_HASWELL(dev))
2340 return INTEL_RC6_ENABLE; 2388 return INTEL_RC6_ENABLE;
2389 }
2341 2390
2342 /* 2391 /* snb/ivb have more than one rc6 state. */
2343 * Disable rc6 on Sandybridge
2344 */
2345 if (INTEL_INFO(dev)->gen == 6) { 2392 if (INTEL_INFO(dev)->gen == 6) {
2346 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n"); 2393 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2347 return INTEL_RC6_ENABLE; 2394 return INTEL_RC6_ENABLE;
2348 } 2395 }
2396
2349 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); 2397 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2350 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 2398 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2351} 2399}
@@ -2383,9 +2431,9 @@ static void gen6_enable_rps(struct drm_device *dev)
2383 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2431 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2384 2432
2385 /* In units of 100MHz */ 2433 /* In units of 100MHz */
2386 dev_priv->max_delay = rp_state_cap & 0xff; 2434 dev_priv->rps.max_delay = rp_state_cap & 0xff;
2387 dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; 2435 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2388 dev_priv->cur_delay = 0; 2436 dev_priv->rps.cur_delay = 0;
2389 2437
2390 /* disable the counters and set deterministic thresholds */ 2438 /* disable the counters and set deterministic thresholds */
2391 I915_WRITE(GEN6_RC_CONTROL, 0); 2439 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2438,8 +2486,8 @@ static void gen6_enable_rps(struct drm_device *dev)
2438 2486
2439 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2487 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2440 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2488 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2441 dev_priv->max_delay << 24 | 2489 dev_priv->rps.max_delay << 24 |
2442 dev_priv->min_delay << 16); 2490 dev_priv->rps.min_delay << 16);
2443 2491
2444 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 2492 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2445 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 2493 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
@@ -2477,7 +2525,7 @@ static void gen6_enable_rps(struct drm_device *dev)
2477 500)) 2525 500))
2478 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2526 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2479 if (pcu_mbox & (1<<31)) { /* OC supported */ 2527 if (pcu_mbox & (1<<31)) { /* OC supported */
2480 dev_priv->max_delay = pcu_mbox & 0xff; 2528 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2481 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2529 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2482 } 2530 }
2483 2531
@@ -2485,10 +2533,10 @@ static void gen6_enable_rps(struct drm_device *dev)
2485 2533
2486 /* requires MSI enabled */ 2534 /* requires MSI enabled */
2487 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); 2535 I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
2488 spin_lock_irq(&dev_priv->rps_lock); 2536 spin_lock_irq(&dev_priv->rps.lock);
2489 WARN_ON(dev_priv->pm_iir != 0); 2537 WARN_ON(dev_priv->rps.pm_iir != 0);
2490 I915_WRITE(GEN6_PMIMR, 0); 2538 I915_WRITE(GEN6_PMIMR, 0);
2491 spin_unlock_irq(&dev_priv->rps_lock); 2539 spin_unlock_irq(&dev_priv->rps.lock);
2492 /* enable all PM interrupts */ 2540 /* enable all PM interrupts */
2493 I915_WRITE(GEN6_PMINTRMSK, 0); 2541 I915_WRITE(GEN6_PMINTRMSK, 0);
2494 2542
@@ -2520,9 +2568,9 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2520 * to use for memory access. We do this by specifying the IA frequency 2568 * to use for memory access. We do this by specifying the IA frequency
2521 * the PCU should use as a reference to determine the ring frequency. 2569 * the PCU should use as a reference to determine the ring frequency.
2522 */ 2570 */
2523 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; 2571 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2524 gpu_freq--) { 2572 gpu_freq--) {
2525 int diff = dev_priv->max_delay - gpu_freq; 2573 int diff = dev_priv->rps.max_delay - gpu_freq;
2526 2574
2527 /* 2575 /*
2528 * For GPU frequencies less than 750MHz, just use the lowest 2576 * For GPU frequencies less than 750MHz, just use the lowest
@@ -2686,14 +2734,16 @@ static const struct cparams {
2686 { 0, 800, 231, 23784 }, 2734 { 0, 800, 231, 23784 },
2687}; 2735};
2688 2736
2689unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 2737static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
2690{ 2738{
2691 u64 total_count, diff, ret; 2739 u64 total_count, diff, ret;
2692 u32 count1, count2, count3, m = 0, c = 0; 2740 u32 count1, count2, count3, m = 0, c = 0;
2693 unsigned long now = jiffies_to_msecs(jiffies), diff1; 2741 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2694 int i; 2742 int i;
2695 2743
2696 diff1 = now - dev_priv->last_time1; 2744 assert_spin_locked(&mchdev_lock);
2745
2746 diff1 = now - dev_priv->ips.last_time1;
2697 2747
2698 /* Prevent division-by-zero if we are asking too fast. 2748 /* Prevent division-by-zero if we are asking too fast.
2699 * Also, we don't get interesting results if we are polling 2749 * Also, we don't get interesting results if we are polling
@@ -2701,7 +2751,7 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2701 * in such cases. 2751 * in such cases.
2702 */ 2752 */
2703 if (diff1 <= 10) 2753 if (diff1 <= 10)
2704 return dev_priv->chipset_power; 2754 return dev_priv->ips.chipset_power;
2705 2755
2706 count1 = I915_READ(DMIEC); 2756 count1 = I915_READ(DMIEC);
2707 count2 = I915_READ(DDREC); 2757 count2 = I915_READ(DDREC);
@@ -2710,16 +2760,16 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2710 total_count = count1 + count2 + count3; 2760 total_count = count1 + count2 + count3;
2711 2761
2712 /* FIXME: handle per-counter overflow */ 2762 /* FIXME: handle per-counter overflow */
2713 if (total_count < dev_priv->last_count1) { 2763 if (total_count < dev_priv->ips.last_count1) {
2714 diff = ~0UL - dev_priv->last_count1; 2764 diff = ~0UL - dev_priv->ips.last_count1;
2715 diff += total_count; 2765 diff += total_count;
2716 } else { 2766 } else {
2717 diff = total_count - dev_priv->last_count1; 2767 diff = total_count - dev_priv->ips.last_count1;
2718 } 2768 }
2719 2769
2720 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 2770 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2721 if (cparams[i].i == dev_priv->c_m && 2771 if (cparams[i].i == dev_priv->ips.c_m &&
2722 cparams[i].t == dev_priv->r_t) { 2772 cparams[i].t == dev_priv->ips.r_t) {
2723 m = cparams[i].m; 2773 m = cparams[i].m;
2724 c = cparams[i].c; 2774 c = cparams[i].c;
2725 break; 2775 break;
@@ -2730,14 +2780,30 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2730 ret = ((m * diff) + c); 2780 ret = ((m * diff) + c);
2731 ret = div_u64(ret, 10); 2781 ret = div_u64(ret, 10);
2732 2782
2733 dev_priv->last_count1 = total_count; 2783 dev_priv->ips.last_count1 = total_count;
2734 dev_priv->last_time1 = now; 2784 dev_priv->ips.last_time1 = now;
2735 2785
2736 dev_priv->chipset_power = ret; 2786 dev_priv->ips.chipset_power = ret;
2737 2787
2738 return ret; 2788 return ret;
2739} 2789}
2740 2790
2791unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2792{
2793 unsigned long val;
2794
2795 if (dev_priv->info->gen != 5)
2796 return 0;
2797
2798 spin_lock_irq(&mchdev_lock);
2799
2800 val = __i915_chipset_val(dev_priv);
2801
2802 spin_unlock_irq(&mchdev_lock);
2803
2804 return val;
2805}
2806
2741unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 2807unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2742{ 2808{
2743 unsigned long m, x, b; 2809 unsigned long m, x, b;
@@ -2894,18 +2960,17 @@ static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2894 return v_table[pxvid].vd; 2960 return v_table[pxvid].vd;
2895} 2961}
2896 2962
2897void i915_update_gfx_val(struct drm_i915_private *dev_priv) 2963static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
2898{ 2964{
2899 struct timespec now, diff1; 2965 struct timespec now, diff1;
2900 u64 diff; 2966 u64 diff;
2901 unsigned long diffms; 2967 unsigned long diffms;
2902 u32 count; 2968 u32 count;
2903 2969
2904 if (dev_priv->info->gen != 5) 2970 assert_spin_locked(&mchdev_lock);
2905 return;
2906 2971
2907 getrawmonotonic(&now); 2972 getrawmonotonic(&now);
2908 diff1 = timespec_sub(now, dev_priv->last_time2); 2973 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
2909 2974
2910 /* Don't divide by 0 */ 2975 /* Don't divide by 0 */
2911 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 2976 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
@@ -2914,28 +2979,42 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2914 2979
2915 count = I915_READ(GFXEC); 2980 count = I915_READ(GFXEC);
2916 2981
2917 if (count < dev_priv->last_count2) { 2982 if (count < dev_priv->ips.last_count2) {
2918 diff = ~0UL - dev_priv->last_count2; 2983 diff = ~0UL - dev_priv->ips.last_count2;
2919 diff += count; 2984 diff += count;
2920 } else { 2985 } else {
2921 diff = count - dev_priv->last_count2; 2986 diff = count - dev_priv->ips.last_count2;
2922 } 2987 }
2923 2988
2924 dev_priv->last_count2 = count; 2989 dev_priv->ips.last_count2 = count;
2925 dev_priv->last_time2 = now; 2990 dev_priv->ips.last_time2 = now;
2926 2991
2927 /* More magic constants... */ 2992 /* More magic constants... */
2928 diff = diff * 1181; 2993 diff = diff * 1181;
2929 diff = div_u64(diff, diffms * 10); 2994 diff = div_u64(diff, diffms * 10);
2930 dev_priv->gfx_power = diff; 2995 dev_priv->ips.gfx_power = diff;
2931} 2996}
2932 2997
2933unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 2998void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2999{
3000 if (dev_priv->info->gen != 5)
3001 return;
3002
3003 spin_lock_irq(&mchdev_lock);
3004
3005 __i915_update_gfx_val(dev_priv);
3006
3007 spin_unlock_irq(&mchdev_lock);
3008}
3009
3010static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
2934{ 3011{
2935 unsigned long t, corr, state1, corr2, state2; 3012 unsigned long t, corr, state1, corr2, state2;
2936 u32 pxvid, ext_v; 3013 u32 pxvid, ext_v;
2937 3014
2938 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 3015 assert_spin_locked(&mchdev_lock);
3016
3017 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
2939 pxvid = (pxvid >> 24) & 0x7f; 3018 pxvid = (pxvid >> 24) & 0x7f;
2940 ext_v = pvid_to_extvid(dev_priv, pxvid); 3019 ext_v = pvid_to_extvid(dev_priv, pxvid);
2941 3020
@@ -2955,27 +3034,31 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2955 3034
2956 corr = corr * ((150142 * state1) / 10000 - 78642); 3035 corr = corr * ((150142 * state1) / 10000 - 78642);
2957 corr /= 100000; 3036 corr /= 100000;
2958 corr2 = (corr * dev_priv->corr); 3037 corr2 = (corr * dev_priv->ips.corr);
2959 3038
2960 state2 = (corr2 * state1) / 10000; 3039 state2 = (corr2 * state1) / 10000;
2961 state2 /= 100; /* convert to mW */ 3040 state2 /= 100; /* convert to mW */
2962 3041
2963 i915_update_gfx_val(dev_priv); 3042 __i915_update_gfx_val(dev_priv);
2964 3043
2965 return dev_priv->gfx_power + state2; 3044 return dev_priv->ips.gfx_power + state2;
2966} 3045}
2967 3046
2968/* Global for IPS driver to get at the current i915 device */ 3047unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2969static struct drm_i915_private *i915_mch_dev; 3048{
2970/* 3049 unsigned long val;
2971 * Lock protecting IPS related data structures 3050
2972 * - i915_mch_dev 3051 if (dev_priv->info->gen != 5)
2973 * - dev_priv->max_delay 3052 return 0;
2974 * - dev_priv->min_delay 3053
2975 * - dev_priv->fmax 3054 spin_lock_irq(&mchdev_lock);
2976 * - dev_priv->gpu_busy 3055
2977 */ 3056 val = __i915_gfx_val(dev_priv);
2978static DEFINE_SPINLOCK(mchdev_lock); 3057
3058 spin_unlock_irq(&mchdev_lock);
3059
3060 return val;
3061}
2979 3062
2980/** 3063/**
2981 * i915_read_mch_val - return value for IPS use 3064 * i915_read_mch_val - return value for IPS use
@@ -2988,18 +3071,18 @@ unsigned long i915_read_mch_val(void)
2988 struct drm_i915_private *dev_priv; 3071 struct drm_i915_private *dev_priv;
2989 unsigned long chipset_val, graphics_val, ret = 0; 3072 unsigned long chipset_val, graphics_val, ret = 0;
2990 3073
2991 spin_lock(&mchdev_lock); 3074 spin_lock_irq(&mchdev_lock);
2992 if (!i915_mch_dev) 3075 if (!i915_mch_dev)
2993 goto out_unlock; 3076 goto out_unlock;
2994 dev_priv = i915_mch_dev; 3077 dev_priv = i915_mch_dev;
2995 3078
2996 chipset_val = i915_chipset_val(dev_priv); 3079 chipset_val = __i915_chipset_val(dev_priv);
2997 graphics_val = i915_gfx_val(dev_priv); 3080 graphics_val = __i915_gfx_val(dev_priv);
2998 3081
2999 ret = chipset_val + graphics_val; 3082 ret = chipset_val + graphics_val;
3000 3083
3001out_unlock: 3084out_unlock:
3002 spin_unlock(&mchdev_lock); 3085 spin_unlock_irq(&mchdev_lock);
3003 3086
3004 return ret; 3087 return ret;
3005} 3088}
@@ -3015,18 +3098,18 @@ bool i915_gpu_raise(void)
3015 struct drm_i915_private *dev_priv; 3098 struct drm_i915_private *dev_priv;
3016 bool ret = true; 3099 bool ret = true;
3017 3100
3018 spin_lock(&mchdev_lock); 3101 spin_lock_irq(&mchdev_lock);
3019 if (!i915_mch_dev) { 3102 if (!i915_mch_dev) {
3020 ret = false; 3103 ret = false;
3021 goto out_unlock; 3104 goto out_unlock;
3022 } 3105 }
3023 dev_priv = i915_mch_dev; 3106 dev_priv = i915_mch_dev;
3024 3107
3025 if (dev_priv->max_delay > dev_priv->fmax) 3108 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
3026 dev_priv->max_delay--; 3109 dev_priv->ips.max_delay--;
3027 3110
3028out_unlock: 3111out_unlock:
3029 spin_unlock(&mchdev_lock); 3112 spin_unlock_irq(&mchdev_lock);
3030 3113
3031 return ret; 3114 return ret;
3032} 3115}
@@ -3043,18 +3126,18 @@ bool i915_gpu_lower(void)
3043 struct drm_i915_private *dev_priv; 3126 struct drm_i915_private *dev_priv;
3044 bool ret = true; 3127 bool ret = true;
3045 3128
3046 spin_lock(&mchdev_lock); 3129 spin_lock_irq(&mchdev_lock);
3047 if (!i915_mch_dev) { 3130 if (!i915_mch_dev) {
3048 ret = false; 3131 ret = false;
3049 goto out_unlock; 3132 goto out_unlock;
3050 } 3133 }
3051 dev_priv = i915_mch_dev; 3134 dev_priv = i915_mch_dev;
3052 3135
3053 if (dev_priv->max_delay < dev_priv->min_delay) 3136 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
3054 dev_priv->max_delay++; 3137 dev_priv->ips.max_delay++;
3055 3138
3056out_unlock: 3139out_unlock:
3057 spin_unlock(&mchdev_lock); 3140 spin_unlock_irq(&mchdev_lock);
3058 3141
3059 return ret; 3142 return ret;
3060} 3143}
@@ -3068,17 +3151,20 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
3068bool i915_gpu_busy(void) 3151bool i915_gpu_busy(void)
3069{ 3152{
3070 struct drm_i915_private *dev_priv; 3153 struct drm_i915_private *dev_priv;
3154 struct intel_ring_buffer *ring;
3071 bool ret = false; 3155 bool ret = false;
3156 int i;
3072 3157
3073 spin_lock(&mchdev_lock); 3158 spin_lock_irq(&mchdev_lock);
3074 if (!i915_mch_dev) 3159 if (!i915_mch_dev)
3075 goto out_unlock; 3160 goto out_unlock;
3076 dev_priv = i915_mch_dev; 3161 dev_priv = i915_mch_dev;
3077 3162
3078 ret = dev_priv->busy; 3163 for_each_ring(ring, dev_priv, i)
3164 ret |= !list_empty(&ring->request_list);
3079 3165
3080out_unlock: 3166out_unlock:
3081 spin_unlock(&mchdev_lock); 3167 spin_unlock_irq(&mchdev_lock);
3082 3168
3083 return ret; 3169 return ret;
3084} 3170}
@@ -3095,20 +3181,20 @@ bool i915_gpu_turbo_disable(void)
3095 struct drm_i915_private *dev_priv; 3181 struct drm_i915_private *dev_priv;
3096 bool ret = true; 3182 bool ret = true;
3097 3183
3098 spin_lock(&mchdev_lock); 3184 spin_lock_irq(&mchdev_lock);
3099 if (!i915_mch_dev) { 3185 if (!i915_mch_dev) {
3100 ret = false; 3186 ret = false;
3101 goto out_unlock; 3187 goto out_unlock;
3102 } 3188 }
3103 dev_priv = i915_mch_dev; 3189 dev_priv = i915_mch_dev;
3104 3190
3105 dev_priv->max_delay = dev_priv->fstart; 3191 dev_priv->ips.max_delay = dev_priv->ips.fstart;
3106 3192
3107 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 3193 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
3108 ret = false; 3194 ret = false;
3109 3195
3110out_unlock: 3196out_unlock:
3111 spin_unlock(&mchdev_lock); 3197 spin_unlock_irq(&mchdev_lock);
3112 3198
3113 return ret; 3199 return ret;
3114} 3200}
@@ -3136,19 +3222,20 @@ ips_ping_for_i915_load(void)
3136 3222
3137void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 3223void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3138{ 3224{
3139 spin_lock(&mchdev_lock); 3225 /* We only register the i915 ips part with intel-ips once everything is
3226 * set up, to avoid intel-ips sneaking in and reading bogus values. */
3227 spin_lock_irq(&mchdev_lock);
3140 i915_mch_dev = dev_priv; 3228 i915_mch_dev = dev_priv;
3141 dev_priv->mchdev_lock = &mchdev_lock; 3229 spin_unlock_irq(&mchdev_lock);
3142 spin_unlock(&mchdev_lock);
3143 3230
3144 ips_ping_for_i915_load(); 3231 ips_ping_for_i915_load();
3145} 3232}
3146 3233
3147void intel_gpu_ips_teardown(void) 3234void intel_gpu_ips_teardown(void)
3148{ 3235{
3149 spin_lock(&mchdev_lock); 3236 spin_lock_irq(&mchdev_lock);
3150 i915_mch_dev = NULL; 3237 i915_mch_dev = NULL;
3151 spin_unlock(&mchdev_lock); 3238 spin_unlock_irq(&mchdev_lock);
3152} 3239}
3153static void intel_init_emon(struct drm_device *dev) 3240static void intel_init_emon(struct drm_device *dev)
3154{ 3241{
@@ -3218,7 +3305,7 @@ static void intel_init_emon(struct drm_device *dev)
3218 3305
3219 lcfuse = I915_READ(LCFUSE02); 3306 lcfuse = I915_READ(LCFUSE02);
3220 3307
3221 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 3308 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3222} 3309}
3223 3310
3224void intel_disable_gt_powersave(struct drm_device *dev) 3311void intel_disable_gt_powersave(struct drm_device *dev)
@@ -3731,42 +3818,6 @@ void intel_init_clock_gating(struct drm_device *dev)
3731 dev_priv->display.init_pch_clock_gating(dev); 3818 dev_priv->display.init_pch_clock_gating(dev);
3732} 3819}
3733 3820
3734static void gen6_sanitize_pm(struct drm_device *dev)
3735{
3736 struct drm_i915_private *dev_priv = dev->dev_private;
3737 u32 limits, delay, old;
3738
3739 gen6_gt_force_wake_get(dev_priv);
3740
3741 old = limits = I915_READ(GEN6_RP_INTERRUPT_LIMITS);
3742 /* Make sure we continue to get interrupts
3743 * until we hit the minimum or maximum frequencies.
3744 */
3745 limits &= ~(0x3f << 16 | 0x3f << 24);
3746 delay = dev_priv->cur_delay;
3747 if (delay < dev_priv->max_delay)
3748 limits |= (dev_priv->max_delay & 0x3f) << 24;
3749 if (delay > dev_priv->min_delay)
3750 limits |= (dev_priv->min_delay & 0x3f) << 16;
3751
3752 if (old != limits) {
3753 /* Note that the known failure case is to read back 0. */
3754 DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
3755 "expected %08x, was %08x\n", limits, old);
3756 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3757 }
3758
3759 gen6_gt_force_wake_put(dev_priv);
3760}
3761
3762void intel_sanitize_pm(struct drm_device *dev)
3763{
3764 struct drm_i915_private *dev_priv = dev->dev_private;
3765
3766 if (dev_priv->display.sanitize_pm)
3767 dev_priv->display.sanitize_pm(dev);
3768}
3769
3770/* Starting with Haswell, we have different power wells for 3821/* Starting with Haswell, we have different power wells for
3771 * different parts of the GPU. This attempts to enable them all. 3822 * different parts of the GPU. This attempts to enable them all.
3772 */ 3823 */
@@ -3852,7 +3903,6 @@ void intel_init_pm(struct drm_device *dev)
3852 dev_priv->display.update_wm = NULL; 3903 dev_priv->display.update_wm = NULL;
3853 } 3904 }
3854 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 3905 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3855 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3856 } else if (IS_IVYBRIDGE(dev)) { 3906 } else if (IS_IVYBRIDGE(dev)) {
3857 /* FIXME: detect B0+ stepping and use auto training */ 3907 /* FIXME: detect B0+ stepping and use auto training */
3858 if (SNB_READ_WM0_LATENCY()) { 3908 if (SNB_READ_WM0_LATENCY()) {
@@ -3864,7 +3914,6 @@ void intel_init_pm(struct drm_device *dev)
3864 dev_priv->display.update_wm = NULL; 3914 dev_priv->display.update_wm = NULL;
3865 } 3915 }
3866 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 3916 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3867 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3868 } else if (IS_HASWELL(dev)) { 3917 } else if (IS_HASWELL(dev)) {
3869 if (SNB_READ_WM0_LATENCY()) { 3918 if (SNB_READ_WM0_LATENCY()) {
3870 dev_priv->display.update_wm = sandybridge_update_wm; 3919 dev_priv->display.update_wm = sandybridge_update_wm;
@@ -3876,7 +3925,6 @@ void intel_init_pm(struct drm_device *dev)
3876 dev_priv->display.update_wm = NULL; 3925 dev_priv->display.update_wm = NULL;
3877 } 3926 }
3878 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 3927 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
3879 dev_priv->display.sanitize_pm = gen6_sanitize_pm;
3880 } else 3928 } else
3881 dev_priv->display.update_wm = NULL; 3929 dev_priv->display.update_wm = NULL;
3882 } else if (IS_VALLEYVIEW(dev)) { 3930 } else if (IS_VALLEYVIEW(dev)) {
@@ -3955,14 +4003,16 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
3955 else 4003 else
3956 forcewake_ack = FORCEWAKE_ACK; 4004 forcewake_ack = FORCEWAKE_ACK;
3957 4005
3958 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) 4006 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3959 DRM_ERROR("Force wake wait timed out\n"); 4007 FORCEWAKE_ACK_TIMEOUT_MS))
4008 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3960 4009
3961 I915_WRITE_NOTRACE(FORCEWAKE, 1); 4010 I915_WRITE_NOTRACE(FORCEWAKE, 1);
3962 POSTING_READ(FORCEWAKE); 4011 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3963 4012
3964 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 4013 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3965 DRM_ERROR("Force wake wait timed out\n"); 4014 FORCEWAKE_ACK_TIMEOUT_MS))
4015 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3966 4016
3967 __gen6_gt_wait_for_thread_c0(dev_priv); 4017 __gen6_gt_wait_for_thread_c0(dev_priv);
3968} 4018}
@@ -3976,14 +4026,16 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
3976 else 4026 else
3977 forcewake_ack = FORCEWAKE_MT_ACK; 4027 forcewake_ack = FORCEWAKE_MT_ACK;
3978 4028
3979 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500)) 4029 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3980 DRM_ERROR("Force wake wait timed out\n"); 4030 FORCEWAKE_ACK_TIMEOUT_MS))
4031 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3981 4032
3982 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); 4033 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
3983 POSTING_READ(FORCEWAKE_MT); 4034 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3984 4035
3985 if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) 4036 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3986 DRM_ERROR("Force wake wait timed out\n"); 4037 FORCEWAKE_ACK_TIMEOUT_MS))
4038 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3987 4039
3988 __gen6_gt_wait_for_thread_c0(dev_priv); 4040 __gen6_gt_wait_for_thread_c0(dev_priv);
3989} 4041}
@@ -4016,14 +4068,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4016static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) 4068static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4017{ 4069{
4018 I915_WRITE_NOTRACE(FORCEWAKE, 0); 4070 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4019 POSTING_READ(FORCEWAKE); 4071 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4020 gen6_gt_check_fifodbg(dev_priv); 4072 gen6_gt_check_fifodbg(dev_priv);
4021} 4073}
4022 4074
4023static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4075static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4024{ 4076{
4025 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); 4077 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
4026 POSTING_READ(FORCEWAKE_MT); 4078 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4027 gen6_gt_check_fifodbg(dev_priv); 4079 gen6_gt_check_fifodbg(dev_priv);
4028} 4080}
4029 4081
@@ -4062,24 +4114,24 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4062 4114
4063static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4115static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4064{ 4116{
4065 /* Already awake? */ 4117 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4066 if ((I915_READ(0x130094) & 0xa1) == 0xa1) 4118 FORCEWAKE_ACK_TIMEOUT_MS))
4067 return; 4119 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4068 4120
4069 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff); 4121 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
4070 POSTING_READ(FORCEWAKE_VLV);
4071 4122
4072 if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500)) 4123 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4073 DRM_ERROR("Force wake wait timed out\n"); 4124 FORCEWAKE_ACK_TIMEOUT_MS))
4125 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4074 4126
4075 __gen6_gt_wait_for_thread_c0(dev_priv); 4127 __gen6_gt_wait_for_thread_c0(dev_priv);
4076} 4128}
4077 4129
4078static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4130static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4079{ 4131{
4080 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000); 4132 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
4081 /* FIXME: confirm VLV behavior with Punit folks */ 4133 /* The below doubles as a POSTING_READ */
4082 POSTING_READ(FORCEWAKE_VLV); 4134 gen6_gt_check_fifodbg(dev_priv);
4083} 4135}
4084 4136
4085void intel_gt_init(struct drm_device *dev) 4137void intel_gt_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1aef516cc6fa..ecbc5c5dbbbc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -261,6 +261,83 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
261 return 0; 261 return 0;
262} 262}
263 263
264static int
265gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
266{
267 int ret;
268
269 ret = intel_ring_begin(ring, 4);
270 if (ret)
271 return ret;
272
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
275 PIPE_CONTROL_STALL_AT_SCOREBOARD);
276 intel_ring_emit(ring, 0);
277 intel_ring_emit(ring, 0);
278 intel_ring_advance(ring);
279
280 return 0;
281}
282
283static int
284gen7_render_ring_flush(struct intel_ring_buffer *ring,
285 u32 invalidate_domains, u32 flush_domains)
286{
287 u32 flags = 0;
288 struct pipe_control *pc = ring->private;
289 u32 scratch_addr = pc->gtt_offset + 128;
290 int ret;
291
292 /*
293 * Ensure that any following seqno writes only happen when the render
294 * cache is indeed flushed.
295 *
296 * Workaround: 4th PIPE_CONTROL command (except the ones with only
297 * read-cache invalidate bits set) must have the CS_STALL bit set. We
298 * don't try to be clever and just set it unconditionally.
299 */
300 flags |= PIPE_CONTROL_CS_STALL;
301
302 /* Just flush everything. Experiments have shown that reducing the
303 * number of bits based on the write domains has little performance
304 * impact.
305 */
306 if (flush_domains) {
307 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
308 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
309 }
310 if (invalidate_domains) {
311 flags |= PIPE_CONTROL_TLB_INVALIDATE;
312 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
313 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
317 /*
318 * TLB invalidate requires a post-sync write.
319 */
320 flags |= PIPE_CONTROL_QW_WRITE;
321
322 /* Workaround: we must issue a pipe_control with CS-stall bit
323 * set before a pipe_control command that has the state cache
324 * invalidate bit set. */
325 gen7_render_ring_cs_stall_wa(ring);
326 }
327
328 ret = intel_ring_begin(ring, 4);
329 if (ret)
330 return ret;
331
332 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
333 intel_ring_emit(ring, flags);
334 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
335 intel_ring_emit(ring, 0);
336 intel_ring_advance(ring);
337
338 return 0;
339}
340
264static void ring_write_tail(struct intel_ring_buffer *ring, 341static void ring_write_tail(struct intel_ring_buffer *ring,
265 u32 value) 342 u32 value)
266{ 343{
@@ -381,12 +458,12 @@ init_pipe_control(struct intel_ring_buffer *ring)
381 458
382 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 459 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
383 460
384 ret = i915_gem_object_pin(obj, 4096, true); 461 ret = i915_gem_object_pin(obj, 4096, true, false);
385 if (ret) 462 if (ret)
386 goto err_unref; 463 goto err_unref;
387 464
388 pc->gtt_offset = obj->gtt_offset; 465 pc->gtt_offset = obj->gtt_offset;
389 pc->cpu_page = kmap(obj->pages[0]); 466 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
390 if (pc->cpu_page == NULL) 467 if (pc->cpu_page == NULL)
391 goto err_unpin; 468 goto err_unpin;
392 469
@@ -413,7 +490,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
413 return; 490 return;
414 491
415 obj = pc->obj; 492 obj = pc->obj;
416 kunmap(obj->pages[0]); 493
494 kunmap(sg_page(obj->pages->sgl));
417 i915_gem_object_unpin(obj); 495 i915_gem_object_unpin(obj);
418 drm_gem_object_unreference(&obj->base); 496 drm_gem_object_unreference(&obj->base);
419 497
@@ -461,7 +539,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
461 if (INTEL_INFO(dev)->gen >= 6) 539 if (INTEL_INFO(dev)->gen >= 6)
462 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 540 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
463 541
464 if (IS_IVYBRIDGE(dev)) 542 if (HAS_L3_GPU_CACHE(dev))
465 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 543 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
466 544
467 return ret; 545 return ret;
@@ -627,26 +705,24 @@ pc_render_add_request(struct intel_ring_buffer *ring,
627} 705}
628 706
629static u32 707static u32
630gen6_ring_get_seqno(struct intel_ring_buffer *ring) 708gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
631{ 709{
632 struct drm_device *dev = ring->dev;
633
634 /* Workaround to force correct ordering between irq and seqno writes on 710 /* Workaround to force correct ordering between irq and seqno writes on
635 * ivb (and maybe also on snb) by reading from a CS register (like 711 * ivb (and maybe also on snb) by reading from a CS register (like
636 * ACTHD) before reading the status page. */ 712 * ACTHD) before reading the status page. */
637 if (IS_GEN6(dev) || IS_GEN7(dev)) 713 if (!lazy_coherency)
638 intel_ring_get_active_head(ring); 714 intel_ring_get_active_head(ring);
639 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 715 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
640} 716}
641 717
642static u32 718static u32
643ring_get_seqno(struct intel_ring_buffer *ring) 719ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
644{ 720{
645 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 721 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
646} 722}
647 723
648static u32 724static u32
649pc_render_get_seqno(struct intel_ring_buffer *ring) 725pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
650{ 726{
651 struct pipe_control *pc = ring->private; 727 struct pipe_control *pc = ring->private;
652 return pc->cpu_page[0]; 728 return pc->cpu_page[0];
@@ -851,7 +927,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
851 927
852 spin_lock_irqsave(&dev_priv->irq_lock, flags); 928 spin_lock_irqsave(&dev_priv->irq_lock, flags);
853 if (ring->irq_refcount++ == 0) { 929 if (ring->irq_refcount++ == 0) {
854 if (IS_IVYBRIDGE(dev) && ring->id == RCS) 930 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
855 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | 931 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
856 GEN6_RENDER_L3_PARITY_ERROR)); 932 GEN6_RENDER_L3_PARITY_ERROR));
857 else 933 else
@@ -874,7 +950,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
874 950
875 spin_lock_irqsave(&dev_priv->irq_lock, flags); 951 spin_lock_irqsave(&dev_priv->irq_lock, flags);
876 if (--ring->irq_refcount == 0) { 952 if (--ring->irq_refcount == 0) {
877 if (IS_IVYBRIDGE(dev) && ring->id == RCS) 953 if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
878 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); 954 I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
879 else 955 else
880 I915_WRITE_IMR(ring, ~0); 956 I915_WRITE_IMR(ring, ~0);
@@ -950,7 +1026,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
950 if (obj == NULL) 1026 if (obj == NULL)
951 return; 1027 return;
952 1028
953 kunmap(obj->pages[0]); 1029 kunmap(sg_page(obj->pages->sgl));
954 i915_gem_object_unpin(obj); 1030 i915_gem_object_unpin(obj);
955 drm_gem_object_unreference(&obj->base); 1031 drm_gem_object_unreference(&obj->base);
956 ring->status_page.obj = NULL; 1032 ring->status_page.obj = NULL;
@@ -971,13 +1047,13 @@ static int init_status_page(struct intel_ring_buffer *ring)
971 1047
972 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1048 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
973 1049
974 ret = i915_gem_object_pin(obj, 4096, true); 1050 ret = i915_gem_object_pin(obj, 4096, true, false);
975 if (ret != 0) { 1051 if (ret != 0) {
976 goto err_unref; 1052 goto err_unref;
977 } 1053 }
978 1054
979 ring->status_page.gfx_addr = obj->gtt_offset; 1055 ring->status_page.gfx_addr = obj->gtt_offset;
980 ring->status_page.page_addr = kmap(obj->pages[0]); 1056 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
981 if (ring->status_page.page_addr == NULL) { 1057 if (ring->status_page.page_addr == NULL) {
982 ret = -ENOMEM; 1058 ret = -ENOMEM;
983 goto err_unpin; 1059 goto err_unpin;
@@ -1009,7 +1085,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1009 ring->dev = dev; 1085 ring->dev = dev;
1010 INIT_LIST_HEAD(&ring->active_list); 1086 INIT_LIST_HEAD(&ring->active_list);
1011 INIT_LIST_HEAD(&ring->request_list); 1087 INIT_LIST_HEAD(&ring->request_list);
1012 INIT_LIST_HEAD(&ring->gpu_write_list);
1013 ring->size = 32 * PAGE_SIZE; 1088 ring->size = 32 * PAGE_SIZE;
1014 1089
1015 init_waitqueue_head(&ring->irq_queue); 1090 init_waitqueue_head(&ring->irq_queue);
@@ -1029,7 +1104,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1029 1104
1030 ring->obj = obj; 1105 ring->obj = obj;
1031 1106
1032 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 1107 ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
1033 if (ret) 1108 if (ret)
1034 goto err_unref; 1109 goto err_unref;
1035 1110
@@ -1378,7 +1453,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1378 1453
1379 if (INTEL_INFO(dev)->gen >= 6) { 1454 if (INTEL_INFO(dev)->gen >= 6) {
1380 ring->add_request = gen6_add_request; 1455 ring->add_request = gen6_add_request;
1381 ring->flush = gen6_render_ring_flush; 1456 ring->flush = gen7_render_ring_flush;
1457 if (INTEL_INFO(dev)->gen == 6)
1458 ring->flush = gen6_render_ring_flush;
1382 ring->irq_get = gen6_ring_get_irq; 1459 ring->irq_get = gen6_ring_get_irq;
1383 ring->irq_put = gen6_ring_put_irq; 1460 ring->irq_put = gen6_ring_put_irq;
1384 ring->irq_enable_mask = GT_USER_INTERRUPT; 1461 ring->irq_enable_mask = GT_USER_INTERRUPT;
@@ -1480,7 +1557,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1480 ring->dev = dev; 1557 ring->dev = dev;
1481 INIT_LIST_HEAD(&ring->active_list); 1558 INIT_LIST_HEAD(&ring->active_list);
1482 INIT_LIST_HEAD(&ring->request_list); 1559 INIT_LIST_HEAD(&ring->request_list);
1483 INIT_LIST_HEAD(&ring->gpu_write_list);
1484 1560
1485 ring->size = size; 1561 ring->size = size;
1486 ring->effective_size = ring->size; 1562 ring->effective_size = ring->size;
@@ -1573,3 +1649,41 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1573 1649
1574 return intel_init_ring_buffer(dev, ring); 1650 return intel_init_ring_buffer(dev, ring);
1575} 1651}
1652
1653int
1654intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
1655{
1656 int ret;
1657
1658 if (!ring->gpu_caches_dirty)
1659 return 0;
1660
1661 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
1662 if (ret)
1663 return ret;
1664
1665 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
1666
1667 ring->gpu_caches_dirty = false;
1668 return 0;
1669}
1670
1671int
1672intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
1673{
1674 uint32_t flush_domains;
1675 int ret;
1676
1677 flush_domains = 0;
1678 if (ring->gpu_caches_dirty)
1679 flush_domains = I915_GEM_GPU_DOMAINS;
1680
1681 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1682 if (ret)
1683 return ret;
1684
1685 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
1686
1687 ring->gpu_caches_dirty = false;
1688 return 0;
1689}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1d3c81fdad92..2ea7a311a1f0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -72,7 +72,14 @@ struct intel_ring_buffer {
72 u32 flush_domains); 72 u32 flush_domains);
73 int (*add_request)(struct intel_ring_buffer *ring, 73 int (*add_request)(struct intel_ring_buffer *ring,
74 u32 *seqno); 74 u32 *seqno);
75 u32 (*get_seqno)(struct intel_ring_buffer *ring); 75 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last
78 * seen value is good enough. Note that the seqno will always be
79 * monotonic, even if not coherent.
80 */
81 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency);
76 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
77 u32 offset, u32 length); 84 u32 offset, u32 length);
78 void (*cleanup)(struct intel_ring_buffer *ring); 85 void (*cleanup)(struct intel_ring_buffer *ring);
@@ -101,15 +108,6 @@ struct intel_ring_buffer {
101 struct list_head request_list; 108 struct list_head request_list;
102 109
103 /** 110 /**
104 * List of objects currently pending a GPU write flush.
105 *
106 * All elements on this list will belong to either the
107 * active_list or flushing_list, last_rendering_seqno can
108 * be used to differentiate between the two elements.
109 */
110 struct list_head gpu_write_list;
111
112 /**
113 * Do we have some not yet emitted requests outstanding? 111 * Do we have some not yet emitted requests outstanding?
114 */ 112 */
115 u32 outstanding_lazy_request; 113 u32 outstanding_lazy_request;
@@ -204,6 +202,8 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
204void intel_ring_advance(struct intel_ring_buffer *ring); 202void intel_ring_advance(struct intel_ring_buffer *ring);
205 203
206u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); 204u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
205int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
206int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
207 207
208int intel_init_render_ring_buffer(struct drm_device *dev); 208int intel_init_render_ring_buffer(struct drm_device *dev);
209int intel_init_bsd_ring_buffer(struct drm_device *dev); 209int intel_init_bsd_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d251d9d7a06c..0007a4d9bf6e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -96,7 +96,7 @@ struct intel_sdvo {
96 /* 96 /*
97 * Hotplug activation bits for this device 97 * Hotplug activation bits for this device
98 */ 98 */
99 uint8_t hotplug_active[2]; 99 uint16_t hotplug_active;
100 100
101 /** 101 /**
102 * This is used to select the color range of RBG outputs in HDMI mode. 102 * This is used to select the color range of RBG outputs in HDMI mode.
@@ -627,6 +627,14 @@ static bool intel_sdvo_set_active_outputs(struct intel_sdvo *intel_sdvo,
627 &outputs, sizeof(outputs)); 627 &outputs, sizeof(outputs));
628} 628}
629 629
630static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
631 u16 *outputs)
632{
633 return intel_sdvo_get_value(intel_sdvo,
634 SDVO_CMD_GET_ACTIVE_OUTPUTS,
635 outputs, sizeof(*outputs));
636}
637
630static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo, 638static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
631 int mode) 639 int mode)
632{ 640{
@@ -1141,51 +1149,132 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1141 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1149 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1142} 1150}
1143 1151
1144static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1152static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
1145{ 1153{
1146 struct drm_device *dev = encoder->dev; 1154 struct intel_sdvo_connector *intel_sdvo_connector =
1155 to_intel_sdvo_connector(&connector->base);
1156 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
1157 u16 active_outputs;
1158
1159 intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1160
1161 if (active_outputs & intel_sdvo_connector->output_flag)
1162 return true;
1163 else
1164 return false;
1165}
1166
1167static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1168 enum pipe *pipe)
1169{
1170 struct drm_device *dev = encoder->base.dev;
1147 struct drm_i915_private *dev_priv = dev->dev_private; 1171 struct drm_i915_private *dev_priv = dev->dev_private;
1148 struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); 1172 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1149 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1173 u32 tmp;
1174
1175 tmp = I915_READ(intel_sdvo->sdvo_reg);
1176
1177 if (!(tmp & SDVO_ENABLE))
1178 return false;
1179
1180 if (HAS_PCH_CPT(dev))
1181 *pipe = PORT_TO_PIPE_CPT(tmp);
1182 else
1183 *pipe = PORT_TO_PIPE(tmp);
1184
1185 return true;
1186}
1187
1188static void intel_disable_sdvo(struct intel_encoder *encoder)
1189{
1190 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1191 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1192 u32 temp;
1193
1194 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1195 if (0)
1196 intel_sdvo_set_encoder_power_state(intel_sdvo,
1197 DRM_MODE_DPMS_OFF);
1198
1199 temp = I915_READ(intel_sdvo->sdvo_reg);
1200 if ((temp & SDVO_ENABLE) != 0) {
1201 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1202 }
1203}
1204
1205static void intel_enable_sdvo(struct intel_encoder *encoder)
1206{
1207 struct drm_device *dev = encoder->base.dev;
1208 struct drm_i915_private *dev_priv = dev->dev_private;
1209 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1210 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1150 u32 temp; 1211 u32 temp;
1212 bool input1, input2;
1213 int i;
1214 u8 status;
1215
1216 temp = I915_READ(intel_sdvo->sdvo_reg);
1217 if ((temp & SDVO_ENABLE) == 0)
1218 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1219 for (i = 0; i < 2; i++)
1220 intel_wait_for_vblank(dev, intel_crtc->pipe);
1221
1222 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1223 /* Warn if the device reported failure to sync.
1224 * A lot of SDVO devices fail to notify of sync, but it's
1225 * a given it the status is a success, we succeeded.
1226 */
1227 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1228 DRM_DEBUG_KMS("First %s output reported failure to "
1229 "sync\n", SDVO_NAME(intel_sdvo));
1230 }
1231
1232 if (0)
1233 intel_sdvo_set_encoder_power_state(intel_sdvo,
1234 DRM_MODE_DPMS_ON);
1235 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1236}
1237
1238static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
1239{
1240 struct drm_crtc *crtc;
1241 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1242
1243 /* dvo supports only 2 dpms states. */
1244 if (mode != DRM_MODE_DPMS_ON)
1245 mode = DRM_MODE_DPMS_OFF;
1246
1247 if (mode == connector->dpms)
1248 return;
1249
1250 connector->dpms = mode;
1251
1252 /* Only need to change hw state when actually enabled */
1253 crtc = intel_sdvo->base.base.crtc;
1254 if (!crtc) {
1255 intel_sdvo->base.connectors_active = false;
1256 return;
1257 }
1151 1258
1152 if (mode != DRM_MODE_DPMS_ON) { 1259 if (mode != DRM_MODE_DPMS_ON) {
1153 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1260 intel_sdvo_set_active_outputs(intel_sdvo, 0);
1154 if (0) 1261 if (0)
1155 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1262 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1156 1263
1157 if (mode == DRM_MODE_DPMS_OFF) { 1264 intel_sdvo->base.connectors_active = false;
1158 temp = I915_READ(intel_sdvo->sdvo_reg); 1265
1159 if ((temp & SDVO_ENABLE) != 0) { 1266 intel_crtc_update_dpms(crtc);
1160 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1161 }
1162 }
1163 } else { 1267 } else {
1164 bool input1, input2; 1268 intel_sdvo->base.connectors_active = true;
1165 int i; 1269
1166 u8 status; 1270 intel_crtc_update_dpms(crtc);
1167
1168 temp = I915_READ(intel_sdvo->sdvo_reg);
1169 if ((temp & SDVO_ENABLE) == 0)
1170 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1171 for (i = 0; i < 2; i++)
1172 intel_wait_for_vblank(dev, intel_crtc->pipe);
1173
1174 status = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2);
1175 /* Warn if the device reported failure to sync.
1176 * A lot of SDVO devices fail to notify of sync, but it's
1177 * a given it the status is a success, we succeeded.
1178 */
1179 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
1180 DRM_DEBUG_KMS("First %s output reported failure to "
1181 "sync\n", SDVO_NAME(intel_sdvo));
1182 }
1183 1271
1184 if (0) 1272 if (0)
1185 intel_sdvo_set_encoder_power_state(intel_sdvo, mode); 1273 intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
1186 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output); 1274 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
1187 } 1275 }
1188 return; 1276
1277 intel_modeset_check_state(connector->dev);
1189} 1278}
1190 1279
1191static int intel_sdvo_mode_valid(struct drm_connector *connector, 1280static int intel_sdvo_mode_valid(struct drm_connector *connector,
@@ -1250,25 +1339,29 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
1250 return true; 1339 return true;
1251} 1340}
1252 1341
1253static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) 1342static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
1254{ 1343{
1255 struct drm_device *dev = intel_sdvo->base.base.dev; 1344 struct drm_device *dev = intel_sdvo->base.base.dev;
1256 u8 response[2]; 1345 uint16_t hotplug;
1257 1346
1258 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1347 /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
1259 * on the line. */ 1348 * on the line. */
1260 if (IS_I945G(dev) || IS_I945GM(dev)) 1349 if (IS_I945G(dev) || IS_I945GM(dev))
1261 return false; 1350 return 0;
1262 1351
1263 return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, 1352 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
1264 &response, 2) && response[0]; 1353 &hotplug, sizeof(hotplug)))
1354 return 0;
1355
1356 return hotplug;
1265} 1357}
1266 1358
1267static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) 1359static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1268{ 1360{
1269 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); 1361 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1270 1362
1271 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); 1363 intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
1364 &intel_sdvo->hotplug_active, 2);
1272} 1365}
1273 1366
1274static bool 1367static bool
@@ -1344,7 +1437,6 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1344 } 1437 }
1345 } else 1438 } else
1346 status = connector_status_disconnected; 1439 status = connector_status_disconnected;
1347 connector->display_info.raw_edid = NULL;
1348 kfree(edid); 1440 kfree(edid);
1349 } 1441 }
1350 1442
@@ -1418,7 +1510,6 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1418 else 1510 else
1419 ret = connector_status_disconnected; 1511 ret = connector_status_disconnected;
1420 1512
1421 connector->display_info.raw_edid = NULL;
1422 kfree(edid); 1513 kfree(edid);
1423 } else 1514 } else
1424 ret = connector_status_connected; 1515 ret = connector_status_connected;
@@ -1464,7 +1555,6 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1464 drm_add_edid_modes(connector, edid); 1555 drm_add_edid_modes(connector, edid);
1465 } 1556 }
1466 1557
1467 connector->display_info.raw_edid = NULL;
1468 kfree(edid); 1558 kfree(edid);
1469 } 1559 }
1470} 1560}
@@ -1836,8 +1926,8 @@ set_value:
1836done: 1926done:
1837 if (intel_sdvo->base.base.crtc) { 1927 if (intel_sdvo->base.base.crtc) {
1838 struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 1928 struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
1839 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1929 intel_set_mode(crtc, &crtc->mode,
1840 crtc->y, crtc->fb); 1930 crtc->x, crtc->y, crtc->fb);
1841 } 1931 }
1842 1932
1843 return 0; 1933 return 0;
@@ -1845,15 +1935,13 @@ done:
1845} 1935}
1846 1936
1847static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { 1937static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
1848 .dpms = intel_sdvo_dpms,
1849 .mode_fixup = intel_sdvo_mode_fixup, 1938 .mode_fixup = intel_sdvo_mode_fixup,
1850 .prepare = intel_encoder_prepare,
1851 .mode_set = intel_sdvo_mode_set, 1939 .mode_set = intel_sdvo_mode_set,
1852 .commit = intel_encoder_commit, 1940 .disable = intel_encoder_noop,
1853}; 1941};
1854 1942
1855static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 1943static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1856 .dpms = drm_helper_connector_dpms, 1944 .dpms = intel_sdvo_dpms,
1857 .detect = intel_sdvo_detect, 1945 .detect = intel_sdvo_detect,
1858 .fill_modes = drm_helper_probe_single_connector_modes, 1946 .fill_modes = drm_helper_probe_single_connector_modes,
1859 .set_property = intel_sdvo_set_property, 1947 .set_property = intel_sdvo_set_property,
@@ -2025,6 +2113,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2025 connector->base.base.interlace_allowed = 1; 2113 connector->base.base.interlace_allowed = 1;
2026 connector->base.base.doublescan_allowed = 0; 2114 connector->base.base.doublescan_allowed = 0;
2027 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2115 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2116 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2028 2117
2029 intel_connector_attach_encoder(&connector->base, &encoder->base); 2118 intel_connector_attach_encoder(&connector->base, &encoder->base);
2030 drm_sysfs_connector_add(&connector->base.base); 2119 drm_sysfs_connector_add(&connector->base.base);
@@ -2063,17 +2152,18 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2063 2152
2064 intel_connector = &intel_sdvo_connector->base; 2153 intel_connector = &intel_sdvo_connector->base;
2065 connector = &intel_connector->base; 2154 connector = &intel_connector->base;
2066 if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { 2155 if (intel_sdvo_get_hotplug_support(intel_sdvo) &
2156 intel_sdvo_connector->output_flag) {
2067 connector->polled = DRM_CONNECTOR_POLL_HPD; 2157 connector->polled = DRM_CONNECTOR_POLL_HPD;
2068 intel_sdvo->hotplug_active[0] |= 1 << device; 2158 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
2069 /* Some SDVO devices have one-shot hotplug interrupts. 2159 /* Some SDVO devices have one-shot hotplug interrupts.
2070 * Ensure that they get re-enabled when an interrupt happens. 2160 * Ensure that they get re-enabled when an interrupt happens.
2071 */ 2161 */
2072 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; 2162 intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
2073 intel_sdvo_enable_hotplug(intel_encoder); 2163 intel_sdvo_enable_hotplug(intel_encoder);
2074 } 2164 } else {
2075 else
2076 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; 2165 connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
2166 }
2077 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2167 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2078 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2168 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2079 2169
@@ -2081,8 +2171,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2081 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2171 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2082 intel_sdvo->is_hdmi = true; 2172 intel_sdvo->is_hdmi = true;
2083 } 2173 }
2084 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2174 intel_sdvo->base.cloneable = true;
2085 (1 << INTEL_ANALOG_CLONE_BIT));
2086 2175
2087 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2176 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2088 if (intel_sdvo->is_hdmi) 2177 if (intel_sdvo->is_hdmi)
@@ -2113,7 +2202,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2113 2202
2114 intel_sdvo->is_tv = true; 2203 intel_sdvo->is_tv = true;
2115 intel_sdvo->base.needs_tv_clock = true; 2204 intel_sdvo->base.needs_tv_clock = true;
2116 intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2205 intel_sdvo->base.cloneable = false;
2117 2206
2118 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2207 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2119 2208
@@ -2156,8 +2245,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2156 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2245 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2157 } 2246 }
2158 2247
2159 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2248 intel_sdvo->base.cloneable = true;
2160 (1 << INTEL_ANALOG_CLONE_BIT));
2161 2249
2162 intel_sdvo_connector_init(intel_sdvo_connector, 2250 intel_sdvo_connector_init(intel_sdvo_connector,
2163 intel_sdvo); 2251 intel_sdvo);
@@ -2189,8 +2277,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2189 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2277 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2190 } 2278 }
2191 2279
2192 intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) | 2280 /* SDVO LVDS is cloneable because the SDVO encoder does the upscaling,
2193 (1 << INTEL_SDVO_LVDS_CLONE_BIT)); 2281 * as opposed to native LVDS, where we upscale with the panel-fitter
2282 * (and hence only the native LVDS resolution could be cloned). */
2283 intel_sdvo->base.cloneable = true;
2194 2284
2195 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2196 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2286 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
@@ -2575,6 +2665,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2575 2665
2576 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); 2666 drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
2577 2667
2668 intel_encoder->disable = intel_disable_sdvo;
2669 intel_encoder->enable = intel_enable_sdvo;
2670 intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
2671
2578 /* In default case sdvo lvds is false */ 2672 /* In default case sdvo lvds is false */
2579 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) 2673 if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
2580 goto err; 2674 goto err;
@@ -2589,7 +2683,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2589 /* Only enable the hotplug irq if we need it, to work around noisy 2683 /* Only enable the hotplug irq if we need it, to work around noisy
2590 * hotplug lines. 2684 * hotplug lines.
2591 */ 2685 */
2592 if (intel_sdvo->hotplug_active[0]) 2686 if (intel_sdvo->hotplug_active)
2593 dev_priv->hotplug_supported_mask |= hotplug_mask; 2687 dev_priv->hotplug_supported_mask |= hotplug_mask;
2594 2688
2595 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); 2689 intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ccfb2ff4c31d..62bb048c135e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -835,22 +835,37 @@ static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
835 base); 835 base);
836} 836}
837 837
838static bool
839intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
840{
841 struct drm_device *dev = encoder->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private;
843 u32 tmp = I915_READ(TV_CTL);
844
845 if (!(tmp & TV_ENC_ENABLE))
846 return false;
847
848 *pipe = PORT_TO_PIPE(tmp);
849
850 return true;
851}
852
838static void 853static void
839intel_tv_dpms(struct drm_encoder *encoder, int mode) 854intel_enable_tv(struct intel_encoder *encoder)
840{ 855{
841 struct drm_device *dev = encoder->dev; 856 struct drm_device *dev = encoder->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = dev->dev_private;
843 858
844 switch (mode) { 859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
845 case DRM_MODE_DPMS_ON: 860}
846 I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); 861
847 break; 862static void
848 case DRM_MODE_DPMS_STANDBY: 863intel_disable_tv(struct intel_encoder *encoder)
849 case DRM_MODE_DPMS_SUSPEND: 864{
850 case DRM_MODE_DPMS_OFF: 865 struct drm_device *dev = encoder->base.dev;
851 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); 866 struct drm_i915_private *dev_priv = dev->dev_private;
852 break; 867
853 } 868 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
854} 869}
855 870
856static const struct tv_mode * 871static const struct tv_mode *
@@ -894,17 +909,14 @@ intel_tv_mode_fixup(struct drm_encoder *encoder,
894 const struct drm_display_mode *mode, 909 const struct drm_display_mode *mode,
895 struct drm_display_mode *adjusted_mode) 910 struct drm_display_mode *adjusted_mode)
896{ 911{
897 struct drm_device *dev = encoder->dev;
898 struct intel_tv *intel_tv = enc_to_intel_tv(encoder); 912 struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
899 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 913 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
900 struct intel_encoder *other_encoder;
901 914
902 if (!tv_mode) 915 if (!tv_mode)
903 return false; 916 return false;
904 917
905 for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder) 918 if (intel_encoder_check_is_cloned(&intel_tv->base))
906 if (&other_encoder->base != encoder) 919 return false;
907 return false;
908 920
909 adjusted_mode->clock = tv_mode->clock; 921 adjusted_mode->clock = tv_mode->clock;
910 return true; 922 return true;
@@ -1302,12 +1314,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1302 if (force) { 1314 if (force) {
1303 struct intel_load_detect_pipe tmp; 1315 struct intel_load_detect_pipe tmp;
1304 1316
1305 if (intel_get_load_detect_pipe(&intel_tv->base, connector, 1317 if (intel_get_load_detect_pipe(connector, &mode, &tmp)) {
1306 &mode, &tmp)) {
1307 type = intel_tv_detect_type(intel_tv, connector); 1318 type = intel_tv_detect_type(intel_tv, connector);
1308 intel_release_load_detect_pipe(&intel_tv->base, 1319 intel_release_load_detect_pipe(connector, &tmp);
1309 connector,
1310 &tmp);
1311 } else 1320 } else
1312 return connector_status_unknown; 1321 return connector_status_unknown;
1313 } else 1322 } else
@@ -1473,22 +1482,20 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1473 } 1482 }
1474 1483
1475 if (changed && crtc) 1484 if (changed && crtc)
1476 drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, 1485 intel_set_mode(crtc, &crtc->mode,
1477 crtc->y, crtc->fb); 1486 crtc->x, crtc->y, crtc->fb);
1478out: 1487out:
1479 return ret; 1488 return ret;
1480} 1489}
1481 1490
1482static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { 1491static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1483 .dpms = intel_tv_dpms,
1484 .mode_fixup = intel_tv_mode_fixup, 1492 .mode_fixup = intel_tv_mode_fixup,
1485 .prepare = intel_encoder_prepare,
1486 .mode_set = intel_tv_mode_set, 1493 .mode_set = intel_tv_mode_set,
1487 .commit = intel_encoder_commit, 1494 .disable = intel_encoder_noop,
1488}; 1495};
1489 1496
1490static const struct drm_connector_funcs intel_tv_connector_funcs = { 1497static const struct drm_connector_funcs intel_tv_connector_funcs = {
1491 .dpms = drm_helper_connector_dpms, 1498 .dpms = intel_connector_dpms,
1492 .detect = intel_tv_detect, 1499 .detect = intel_tv_detect,
1493 .destroy = intel_tv_destroy, 1500 .destroy = intel_tv_destroy,
1494 .set_property = intel_tv_set_property, 1501 .set_property = intel_tv_set_property,
@@ -1618,10 +1625,15 @@ intel_tv_init(struct drm_device *dev)
1618 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1625 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1619 DRM_MODE_ENCODER_TVDAC); 1626 DRM_MODE_ENCODER_TVDAC);
1620 1627
1628 intel_encoder->enable = intel_enable_tv;
1629 intel_encoder->disable = intel_disable_tv;
1630 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1631 intel_connector->get_hw_state = intel_connector_get_hw_state;
1632
1621 intel_connector_attach_encoder(intel_connector, intel_encoder); 1633 intel_connector_attach_encoder(intel_connector, intel_encoder);
1622 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1634 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1623 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 1635 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1624 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1636 intel_encoder->cloneable = false;
1625 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); 1637 intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
1626 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1638 intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1627 intel_tv->type = DRM_MODE_CONNECTOR_Unknown; 1639 intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 73868d0c25ae..5ea5033eae0a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -195,7 +195,6 @@ struct mga_device {
195 struct drm_global_reference mem_global_ref; 195 struct drm_global_reference mem_global_ref;
196 struct ttm_bo_global_ref bo_global_ref; 196 struct ttm_bo_global_ref bo_global_ref;
197 struct ttm_bo_device bdev; 197 struct ttm_bo_device bdev;
198 atomic_t validate_sequence;
199 } ttm; 198 } ttm;
200 199
201 u32 reg_1e24; /* SE model number */ 200 u32 reg_1e24; /* SE model number */
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 3d429de0771a..d3d99a28ddef 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1398,7 +1398,6 @@ static int mga_vga_get_modes(struct drm_connector *connector)
1398 if (edid) { 1398 if (edid) {
1399 drm_mode_connector_update_edid_property(connector, edid); 1399 drm_mode_connector_update_edid_property(connector, edid);
1400 ret = drm_add_edid_modes(connector, edid); 1400 ret = drm_add_edid_modes(connector, edid);
1401 connector->display_info.raw_edid = NULL;
1402 kfree(edid); 1401 kfree(edid);
1403 } 1402 }
1404 return ret; 1403 return ret;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 97a81260485a..8a55beeb8bdc 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,6 +17,34 @@ config DRM_NOUVEAU
17 help 17 help
18 Choose this option for open-source nVidia support. 18 Choose this option for open-source nVidia support.
19 19
20config NOUVEAU_DEBUG
21 int "Maximum debug level"
22 depends on DRM_NOUVEAU
23 range 0 7
24 default 5
25 help
26 Selects the maximum debug level to compile support for.
27
28 0 - fatal
29 1 - error
30 2 - warning
31 3 - info
32 4 - debug
33 5 - trace (recommended)
34 6 - paranoia
35 7 - spam
36
37 The paranoia and spam levels will add a lot of extra checks which
38 may potentially slow down driver operation.
39
40config NOUVEAU_DEBUG_DEFAULT
41 int "Default debug level"
42 depends on DRM_NOUVEAU
43 range 0 7
44 default 3
45 help
46 Selects the default debug level
47
20config DRM_NOUVEAU_BACKLIGHT 48config DRM_NOUVEAU_BACKLIGHT
21 bool "Support for backlight control" 49 bool "Support for backlight control"
22 depends on DRM_NOUVEAU 50 depends on DRM_NOUVEAU
@@ -25,14 +53,6 @@ config DRM_NOUVEAU_BACKLIGHT
25 Say Y here if you want to control the backlight of your display 53 Say Y here if you want to control the backlight of your display
26 (e.g. a laptop panel). 54 (e.g. a laptop panel).
27 55
28config DRM_NOUVEAU_DEBUG
29 bool "Build in Nouveau's debugfs support"
30 depends on DRM_NOUVEAU && DEBUG_FS
31 default y
32 help
33 Say Y here if you want Nouveau to output debugging information
34 via debugfs.
35
36menu "I2C encoder or helper chips" 56menu "I2C encoder or helper chips"
37 depends on DRM && DRM_KMS_HELPER && I2C 57 depends on DRM && DRM_KMS_HELPER && I2C
38 58
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1cece6a78f39..a990df4d6c04 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -3,49 +3,190 @@
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ 6ccflags-y += -I$(src)/core/include
7 nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \ 7ccflags-y += -I$(src)/core
8 nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ 8ccflags-y += -I$(src)
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ 9
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ 10nouveau-y := core/core/client.o
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ 11nouveau-y += core/core/engctx.o
12 nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \ 12nouveau-y += core/core/engine.o
13 nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ 13nouveau-y += core/core/enum.o
14 nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \ 14nouveau-y += core/core/gpuobj.o
15 nouveau_abi16.o \ 15nouveau-y += core/core/handle.o
16 nv04_timer.o \ 16nouveau-y += core/core/mm.o
17 nv04_mc.o nv40_mc.o nv50_mc.o \ 17nouveau-y += core/core/namedb.o
18 nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \ 18nouveau-y += core/core/object.o
19 nv50_fb.o nvc0_fb.o \ 19nouveau-y += core/core/option.o
20 nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \ 20nouveau-y += core/core/parent.o
21 nv84_fifo.o nvc0_fifo.o nve0_fifo.o \ 21nouveau-y += core/core/printk.o
22 nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \ 22nouveau-y += core/core/ramht.o
23 nv04_software.o nv50_software.o nvc0_software.o \ 23nouveau-y += core/core/subdev.o
24 nv04_graph.o nv10_graph.o nv20_graph.o \ 24
25 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \ 25nouveau-y += core/subdev/bar/base.o
26 nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \ 26nouveau-y += core/subdev/bar/nv50.o
27 nv84_crypt.o nv98_crypt.o \ 27nouveau-y += core/subdev/bar/nvc0.o
28 nva3_copy.o nvc0_copy.o \ 28nouveau-y += core/subdev/bios/base.o
29 nv31_mpeg.o nv50_mpeg.o \ 29nouveau-y += core/subdev/bios/bit.o
30 nv84_bsp.o \ 30nouveau-y += core/subdev/bios/conn.o
31 nv84_vp.o \ 31nouveau-y += core/subdev/bios/dcb.o
32 nv98_ppp.o \ 32nouveau-y += core/subdev/bios/dp.o
33 nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ 33nouveau-y += core/subdev/bios/extdev.o
34 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 34nouveau-y += core/subdev/bios/gpio.o
35 nv04_crtc.o nv04_display.o nv04_cursor.o \ 35nouveau-y += core/subdev/bios/i2c.o
36 nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ 36nouveau-y += core/subdev/bios/init.o
37 nv50_cursor.o nv50_display.o \ 37nouveau-y += core/subdev/bios/mxm.o
38 nvd0_display.o \ 38nouveau-y += core/subdev/bios/perf.o
39 nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ 39nouveau-y += core/subdev/bios/pll.o
40 nv10_gpio.o nv50_gpio.o \ 40nouveau-y += core/subdev/bios/therm.o
41 nv50_calc.o \ 41nouveau-y += core/subdev/clock/nv04.o
42 nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \ 42nouveau-y += core/subdev/clock/nv40.o
43 nv50_vram.o nvc0_vram.o \ 43nouveau-y += core/subdev/clock/nv50.o
44 nv50_vm.o nvc0_vm.o nouveau_prime.o 44nouveau-y += core/subdev/clock/nva3.o
45 45nouveau-y += core/subdev/clock/nvc0.o
46nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 46nouveau-y += core/subdev/clock/pllnv04.o
47nouveau-y += core/subdev/clock/pllnva3.o
48nouveau-y += core/subdev/device/base.o
49nouveau-y += core/subdev/device/nv04.o
50nouveau-y += core/subdev/device/nv10.o
51nouveau-y += core/subdev/device/nv20.o
52nouveau-y += core/subdev/device/nv30.o
53nouveau-y += core/subdev/device/nv40.o
54nouveau-y += core/subdev/device/nv50.o
55nouveau-y += core/subdev/device/nvc0.o
56nouveau-y += core/subdev/device/nve0.o
57nouveau-y += core/subdev/devinit/base.o
58nouveau-y += core/subdev/devinit/nv04.o
59nouveau-y += core/subdev/devinit/nv05.o
60nouveau-y += core/subdev/devinit/nv10.o
61nouveau-y += core/subdev/devinit/nv1a.o
62nouveau-y += core/subdev/devinit/nv20.o
63nouveau-y += core/subdev/devinit/nv50.o
64nouveau-y += core/subdev/fb/base.o
65nouveau-y += core/subdev/fb/nv04.o
66nouveau-y += core/subdev/fb/nv10.o
67nouveau-y += core/subdev/fb/nv20.o
68nouveau-y += core/subdev/fb/nv30.o
69nouveau-y += core/subdev/fb/nv40.o
70nouveau-y += core/subdev/fb/nv50.o
71nouveau-y += core/subdev/fb/nvc0.o
72nouveau-y += core/subdev/gpio/base.o
73nouveau-y += core/subdev/gpio/nv10.o
74nouveau-y += core/subdev/gpio/nv50.o
75nouveau-y += core/subdev/gpio/nvd0.o
76nouveau-y += core/subdev/i2c/base.o
77nouveau-y += core/subdev/i2c/aux.o
78nouveau-y += core/subdev/i2c/bit.o
79nouveau-y += core/subdev/ibus/nvc0.o
80nouveau-y += core/subdev/ibus/nve0.o
81nouveau-y += core/subdev/instmem/base.o
82nouveau-y += core/subdev/instmem/nv04.o
83nouveau-y += core/subdev/instmem/nv40.o
84nouveau-y += core/subdev/instmem/nv50.o
85nouveau-y += core/subdev/ltcg/nvc0.o
86nouveau-y += core/subdev/mc/base.o
87nouveau-y += core/subdev/mc/nv04.o
88nouveau-y += core/subdev/mc/nv44.o
89nouveau-y += core/subdev/mc/nv50.o
90nouveau-y += core/subdev/mc/nv98.o
91nouveau-y += core/subdev/mc/nvc0.o
92nouveau-y += core/subdev/mxm/base.o
93nouveau-y += core/subdev/mxm/mxms.o
94nouveau-y += core/subdev/mxm/nv50.o
95nouveau-y += core/subdev/therm/base.o
96nouveau-y += core/subdev/therm/fan.o
97nouveau-y += core/subdev/therm/ic.o
98nouveau-y += core/subdev/therm/nv40.o
99nouveau-y += core/subdev/therm/nv50.o
100nouveau-y += core/subdev/therm/temp.o
101nouveau-y += core/subdev/timer/base.o
102nouveau-y += core/subdev/timer/nv04.o
103nouveau-y += core/subdev/vm/base.o
104nouveau-y += core/subdev/vm/nv04.o
105nouveau-y += core/subdev/vm/nv41.o
106nouveau-y += core/subdev/vm/nv44.o
107nouveau-y += core/subdev/vm/nv50.o
108nouveau-y += core/subdev/vm/nvc0.o
109
110nouveau-y += core/engine/dmaobj/base.o
111nouveau-y += core/engine/dmaobj/nv04.o
112nouveau-y += core/engine/dmaobj/nv50.o
113nouveau-y += core/engine/dmaobj/nvc0.o
114nouveau-y += core/engine/bsp/nv84.o
115nouveau-y += core/engine/copy/nva3.o
116nouveau-y += core/engine/copy/nvc0.o
117nouveau-y += core/engine/copy/nve0.o
118nouveau-y += core/engine/crypt/nv84.o
119nouveau-y += core/engine/crypt/nv98.o
120nouveau-y += core/engine/disp/nv04.o
121nouveau-y += core/engine/disp/nv50.o
122nouveau-y += core/engine/disp/nvd0.o
123nouveau-y += core/engine/disp/vga.o
124nouveau-y += core/engine/fifo/base.o
125nouveau-y += core/engine/fifo/nv04.o
126nouveau-y += core/engine/fifo/nv10.o
127nouveau-y += core/engine/fifo/nv17.o
128nouveau-y += core/engine/fifo/nv40.o
129nouveau-y += core/engine/fifo/nv50.o
130nouveau-y += core/engine/fifo/nv84.o
131nouveau-y += core/engine/fifo/nvc0.o
132nouveau-y += core/engine/fifo/nve0.o
133nouveau-y += core/engine/graph/ctxnv40.o
134nouveau-y += core/engine/graph/ctxnv50.o
135nouveau-y += core/engine/graph/ctxnvc0.o
136nouveau-y += core/engine/graph/ctxnve0.o
137nouveau-y += core/engine/graph/nv04.o
138nouveau-y += core/engine/graph/nv10.o
139nouveau-y += core/engine/graph/nv20.o
140nouveau-y += core/engine/graph/nv25.o
141nouveau-y += core/engine/graph/nv2a.o
142nouveau-y += core/engine/graph/nv30.o
143nouveau-y += core/engine/graph/nv34.o
144nouveau-y += core/engine/graph/nv35.o
145nouveau-y += core/engine/graph/nv40.o
146nouveau-y += core/engine/graph/nv50.o
147nouveau-y += core/engine/graph/nvc0.o
148nouveau-y += core/engine/graph/nve0.o
149nouveau-y += core/engine/mpeg/nv31.o
150nouveau-y += core/engine/mpeg/nv40.o
151nouveau-y += core/engine/mpeg/nv50.o
152nouveau-y += core/engine/mpeg/nv84.o
153nouveau-y += core/engine/ppp/nv98.o
154nouveau-y += core/engine/software/nv04.o
155nouveau-y += core/engine/software/nv10.o
156nouveau-y += core/engine/software/nv50.o
157nouveau-y += core/engine/software/nvc0.o
158nouveau-y += core/engine/vp/nv84.o
159
160# drm/core
161nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
162nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
163nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
164nouveau-y += nouveau_prime.o nouveau_abi16.o
165nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
166
167# drm/kms
168nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
169nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o
170nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
171
172# drm/kms/nv04:nv50
173nouveau-y += nouveau_hw.o nouveau_calc.o
174nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
175nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
176
177# drm/kms/nv50-
178nouveau-y += nv50_display.o nvd0_display.o
179nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
180nouveau-y += nv50_evo.o
181
182# drm/pm
183nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
184nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
185nouveau-y += nouveau_mem.o
186
187# other random bits
47nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 188nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
48nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
49nouveau-$(CONFIG_ACPI) += nouveau_acpi.o 189nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
190nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
50 191
51obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o 192obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
new file mode 100644
index 000000000000..c617f0480071
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/handle.h>
28#include <core/option.h>
29
30#include <subdev/device.h>
31
32static void
33nouveau_client_dtor(struct nouveau_object *object)
34{
35 struct nouveau_client *client = (void *)object;
36 nouveau_object_ref(NULL, &client->device);
37 nouveau_handle_destroy(client->root);
38 nouveau_namedb_destroy(&client->base);
39}
40
41static struct nouveau_oclass
42nouveau_client_oclass = {
43 .ofuncs = &(struct nouveau_ofuncs) {
44 .dtor = nouveau_client_dtor,
45 },
46};
47
48int
49nouveau_client_create_(const char *name, u64 devname, const char *cfg,
50 const char *dbg, int length, void **pobject)
51{
52 struct nouveau_object *device;
53 struct nouveau_client *client;
54 int ret;
55
56 device = (void *)nouveau_device_find(devname);
57 if (!device)
58 return -ENODEV;
59
60 ret = nouveau_namedb_create_(NULL, NULL, &nouveau_client_oclass,
61 NV_CLIENT_CLASS, nouveau_device_sclass,
62 0, length, pobject);
63 client = *pobject;
64 if (ret)
65 return ret;
66
67 ret = nouveau_handle_create(nv_object(client), ~0, ~0,
68 nv_object(client), &client->root);
69 if (ret) {
70 nouveau_namedb_destroy(&client->base);
71 return ret;
72 }
73
74 /* prevent init/fini being called, os in in charge of this */
75 atomic_set(&nv_object(client)->usecount, 2);
76
77 nouveau_object_ref(device, &client->device);
78 snprintf(client->name, sizeof(client->name), "%s", name);
79 client->debug = nouveau_dbgopt(dbg, "CLIENT");
80 return 0;
81}
82
83int
84nouveau_client_init(struct nouveau_client *client)
85{
86 int ret;
87 nv_debug(client, "init running\n");
88 ret = nouveau_handle_init(client->root);
89 nv_debug(client, "init completed with %d\n", ret);
90 return ret;
91}
92
93int
94nouveau_client_fini(struct nouveau_client *client, bool suspend)
95{
96 const char *name[2] = { "fini", "suspend" };
97 int ret;
98
99 nv_debug(client, "%s running\n", name[suspend]);
100 ret = nouveau_handle_fini(client->root, suspend);
101 nv_debug(client, "%s completed with %d\n", name[suspend], ret);
102 return ret;
103}
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
new file mode 100644
index 000000000000..e41b10d5eb59
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -0,0 +1,236 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/namedb.h>
27#include <core/handle.h>
28#include <core/client.h>
29#include <core/engctx.h>
30
31#include <subdev/vm.h>
32
33static inline int
34nouveau_engctx_exists(struct nouveau_object *parent,
35 struct nouveau_engine *engine, void **pobject)
36{
37 struct nouveau_engctx *engctx;
38 struct nouveau_object *parctx;
39
40 list_for_each_entry(engctx, &engine->contexts, head) {
41 parctx = nv_pclass(nv_object(engctx), NV_PARENT_CLASS);
42 if (parctx == parent) {
43 atomic_inc(&nv_object(engctx)->refcount);
44 *pobject = engctx;
45 return 1;
46 }
47 }
48
49 return 0;
50}
51
52int
53nouveau_engctx_create_(struct nouveau_object *parent,
54 struct nouveau_object *engobj,
55 struct nouveau_oclass *oclass,
56 struct nouveau_object *pargpu,
57 u32 size, u32 align, u32 flags,
58 int length, void **pobject)
59{
60 struct nouveau_client *client = nouveau_client(parent);
61 struct nouveau_engine *engine = nv_engine(engobj);
62 struct nouveau_object *engctx;
63 unsigned long save;
64 int ret;
65
66 /* check if this engine already has a context for the parent object,
67 * and reference it instead of creating a new one
68 */
69 spin_lock_irqsave(&engine->lock, save);
70 ret = nouveau_engctx_exists(parent, engine, pobject);
71 spin_unlock_irqrestore(&engine->lock, save);
72 if (ret)
73 return ret;
74
75 /* create the new context, supports creating both raw objects and
76 * objects backed by instance memory
77 */
78 if (size) {
79 ret = nouveau_gpuobj_create_(parent, engobj, oclass,
80 NV_ENGCTX_CLASS,
81 pargpu, size, align, flags,
82 length, pobject);
83 } else {
84 ret = nouveau_object_create_(parent, engobj, oclass,
85 NV_ENGCTX_CLASS, length, pobject);
86 }
87
88 engctx = *pobject;
89 if (ret)
90 return ret;
91
92 /* must take the lock again and re-check a context doesn't already
93 * exist (in case of a race) - the lock had to be dropped before as
94 * it's not possible to allocate the object with it held.
95 */
96 spin_lock_irqsave(&engine->lock, save);
97 ret = nouveau_engctx_exists(parent, engine, pobject);
98 if (ret) {
99 spin_unlock_irqrestore(&engine->lock, save);
100 nouveau_object_ref(NULL, &engctx);
101 return ret;
102 }
103
104 if (client->vm)
105 atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
106 list_add(&nv_engctx(engctx)->head, &engine->contexts);
107 nv_engctx(engctx)->addr = ~0ULL;
108 spin_unlock_irqrestore(&engine->lock, save);
109 return 0;
110}
111
112void
113nouveau_engctx_destroy(struct nouveau_engctx *engctx)
114{
115 struct nouveau_object *engobj = nv_object(engctx)->engine;
116 struct nouveau_engine *engine = nv_engine(engobj);
117 struct nouveau_client *client = nouveau_client(engctx);
118 unsigned long save;
119
120 nouveau_gpuobj_unmap(&engctx->vma);
121 spin_lock_irqsave(&engine->lock, save);
122 list_del(&engctx->head);
123 spin_unlock_irqrestore(&engine->lock, save);
124
125 if (client->vm)
126 atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
127
128 if (engctx->base.size)
129 nouveau_gpuobj_destroy(&engctx->base);
130 else
131 nouveau_object_destroy(&engctx->base.base);
132}
133
134int
135nouveau_engctx_init(struct nouveau_engctx *engctx)
136{
137 struct nouveau_object *object = nv_object(engctx);
138 struct nouveau_subdev *subdev = nv_subdev(object->engine);
139 struct nouveau_object *parent;
140 struct nouveau_subdev *pardev;
141 int ret;
142
143 ret = nouveau_gpuobj_init(&engctx->base);
144 if (ret)
145 return ret;
146
147 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
148 pardev = nv_subdev(parent->engine);
149 if (nv_parent(parent)->context_attach) {
150 mutex_lock(&pardev->mutex);
151 ret = nv_parent(parent)->context_attach(parent, object);
152 mutex_unlock(&pardev->mutex);
153 }
154
155 if (ret) {
156 nv_error(parent, "failed to attach %s context, %d\n",
157 subdev->name, ret);
158 return ret;
159 }
160
161 nv_debug(parent, "attached %s context\n", subdev->name);
162 return 0;
163}
164
165int
166nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
167{
168 struct nouveau_object *object = nv_object(engctx);
169 struct nouveau_subdev *subdev = nv_subdev(object->engine);
170 struct nouveau_object *parent;
171 struct nouveau_subdev *pardev;
172 int ret = 0;
173
174 parent = nv_pclass(object->parent, NV_PARENT_CLASS);
175 pardev = nv_subdev(parent->engine);
176 if (nv_parent(parent)->context_detach) {
177 mutex_lock(&pardev->mutex);
178 ret = nv_parent(parent)->context_detach(parent, suspend, object);
179 mutex_unlock(&pardev->mutex);
180 }
181
182 if (ret) {
183 nv_error(parent, "failed to detach %s context, %d\n",
184 subdev->name, ret);
185 return ret;
186 }
187
188 nv_debug(parent, "detached %s context\n", subdev->name);
189 return nouveau_gpuobj_fini(&engctx->base, suspend);
190}
191
192void
193_nouveau_engctx_dtor(struct nouveau_object *object)
194{
195 nouveau_engctx_destroy(nv_engctx(object));
196}
197
198int
199_nouveau_engctx_init(struct nouveau_object *object)
200{
201 return nouveau_engctx_init(nv_engctx(object));
202}
203
204
205int
206_nouveau_engctx_fini(struct nouveau_object *object, bool suspend)
207{
208 return nouveau_engctx_fini(nv_engctx(object), suspend);
209}
210
211struct nouveau_object *
212nouveau_engctx_get(struct nouveau_engine *engine, u64 addr)
213{
214 struct nouveau_engctx *engctx;
215 unsigned long flags;
216
217 spin_lock_irqsave(&engine->lock, flags);
218 list_for_each_entry(engctx, &engine->contexts, head) {
219 if (engctx->addr == addr) {
220 engctx->save = flags;
221 return nv_object(engctx);
222 }
223 }
224 spin_unlock_irqrestore(&engine->lock, flags);
225 return NULL;
226}
227
228void
229nouveau_engctx_put(struct nouveau_object *object)
230{
231 if (object) {
232 struct nouveau_engine *engine = nv_engine(object->engine);
233 struct nouveau_engctx *engctx = nv_engctx(object);
234 spin_unlock_irqrestore(&engine->lock, engctx->save);
235 }
236}
diff --git a/drivers/gpu/drm/nouveau/core/core/engine.c b/drivers/gpu/drm/nouveau/core/core/engine.c
new file mode 100644
index 000000000000..09b3bd502fd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/engine.c
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26#include <core/engine.h>
27#include <core/option.h>
28
29int
30nouveau_engine_create_(struct nouveau_object *parent,
31 struct nouveau_object *engobj,
32 struct nouveau_oclass *oclass, bool enable,
33 const char *iname, const char *fname,
34 int length, void **pobject)
35{
36 struct nouveau_device *device = nv_device(parent);
37 struct nouveau_engine *engine;
38 int ret;
39
40 ret = nouveau_subdev_create_(parent, engobj, oclass, NV_ENGINE_CLASS,
41 iname, fname, length, pobject);
42 engine = *pobject;
43 if (ret)
44 return ret;
45
46 if (!nouveau_boolopt(device->cfgopt, iname, enable)) {
47 if (!enable)
48 nv_warn(engine, "disabled, %s=1 to enable\n", iname);
49 return -ENODEV;
50 }
51
52 INIT_LIST_HEAD(&engine->contexts);
53 spin_lock_init(&engine->lock);
54 return 0;
55}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index e51b51503baa..7cc7133d82de 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -25,27 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/ratelimit.h> 28#include <core/os.h>
29 29#include <core/enum.h>
30#include "nouveau_util.h"
31
32static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
33
34void
35nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
36{
37 while (bf->name) {
38 if (value & bf->mask) {
39 printk(" %s", bf->name);
40 value &= ~bf->mask;
41 }
42
43 bf++;
44 }
45
46 if (value)
47 printk(" (unknown bits 0x%08x)", value);
48}
49 30
50const struct nouveau_enum * 31const struct nouveau_enum *
51nouveau_enum_find(const struct nouveau_enum *en, u32 value) 32nouveau_enum_find(const struct nouveau_enum *en, u32 value)
@@ -63,16 +44,24 @@ void
63nouveau_enum_print(const struct nouveau_enum *en, u32 value) 44nouveau_enum_print(const struct nouveau_enum *en, u32 value)
64{ 45{
65 en = nouveau_enum_find(en, value); 46 en = nouveau_enum_find(en, value);
66 if (en) { 47 if (en)
67 printk("%s", en->name); 48 printk("%s", en->name);
68 return; 49 else
69 } 50 printk("(unknown enum 0x%08x)", value);
70
71 printk("(unknown enum 0x%08x)", value);
72} 51}
73 52
74int 53void
75nouveau_ratelimit(void) 54nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
76{ 55{
77 return __ratelimit(&nouveau_ratelimit_state); 56 while (bf->name) {
57 if (value & bf->mask) {
58 printk(" %s", bf->name);
59 value &= ~bf->mask;
60 }
61
62 bf++;
63 }
64
65 if (value)
66 printk(" (unknown bits 0x%08x)", value);
78} 67}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
new file mode 100644
index 000000000000..1f34549aff18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -0,0 +1,318 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/gpuobj.h>
27
28#include <subdev/instmem.h>
29#include <subdev/bar.h>
30#include <subdev/vm.h>
31
32void
33nouveau_gpuobj_destroy(struct nouveau_gpuobj *gpuobj)
34{
35 int i;
36
37 if (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE) {
38 for (i = 0; i < gpuobj->size; i += 4)
39 nv_wo32(gpuobj, i, 0x00000000);
40 }
41
42 if (gpuobj->heap.block_size)
43 nouveau_mm_fini(&gpuobj->heap);
44
45 nouveau_object_destroy(&gpuobj->base);
46}
47
48int
49nouveau_gpuobj_create_(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, u32 pclass,
52 struct nouveau_object *pargpu,
53 u32 size, u32 align, u32 flags,
54 int length, void **pobject)
55{
56 struct nouveau_instmem *imem = nouveau_instmem(parent);
57 struct nouveau_bar *bar = nouveau_bar(parent);
58 struct nouveau_gpuobj *gpuobj;
59 struct nouveau_mm *heap = NULL;
60 int ret, i;
61 u64 addr;
62
63 *pobject = NULL;
64
65 if (pargpu) {
66 while ((pargpu = nv_pclass(pargpu, NV_GPUOBJ_CLASS))) {
67 if (nv_gpuobj(pargpu)->heap.block_size)
68 break;
69 pargpu = pargpu->parent;
70 }
71
72 if (unlikely(pargpu == NULL)) {
73 nv_error(parent, "no gpuobj heap\n");
74 return -EINVAL;
75 }
76
77 addr = nv_gpuobj(pargpu)->addr;
78 heap = &nv_gpuobj(pargpu)->heap;
79 atomic_inc(&parent->refcount);
80 } else {
81 ret = imem->alloc(imem, parent, size, align, &parent);
82 pargpu = parent;
83 if (ret)
84 return ret;
85
86 addr = nv_memobj(pargpu)->addr;
87 size = nv_memobj(pargpu)->size;
88
89 if (bar && bar->alloc) {
90 struct nouveau_instobj *iobj = (void *)parent;
91 struct nouveau_mem **mem = (void *)(iobj + 1);
92 struct nouveau_mem *node = *mem;
93 if (!bar->alloc(bar, parent, node, &pargpu)) {
94 nouveau_object_ref(NULL, &parent);
95 parent = pargpu;
96 }
97 }
98 }
99
100 ret = nouveau_object_create_(parent, engine, oclass, pclass |
101 NV_GPUOBJ_CLASS, length, pobject);
102 nouveau_object_ref(NULL, &parent);
103 gpuobj = *pobject;
104 if (ret)
105 return ret;
106
107 gpuobj->parent = pargpu;
108 gpuobj->flags = flags;
109 gpuobj->addr = addr;
110 gpuobj->size = size;
111
112 if (heap) {
113 ret = nouveau_mm_head(heap, 1, size, size,
114 max(align, (u32)1), &gpuobj->node);
115 if (ret)
116 return ret;
117
118 gpuobj->addr += gpuobj->node->offset;
119 }
120
121 if (gpuobj->flags & NVOBJ_FLAG_HEAP) {
122 ret = nouveau_mm_init(&gpuobj->heap, 0, gpuobj->size, 1);
123 if (ret)
124 return ret;
125 }
126
127 if (flags & NVOBJ_FLAG_ZERO_ALLOC) {
128 for (i = 0; i < gpuobj->size; i += 4)
129 nv_wo32(gpuobj, i, 0x00000000);
130 }
131
132 return ret;
133}
134
135struct nouveau_gpuobj_class {
136 struct nouveau_object *pargpu;
137 u64 size;
138 u32 align;
139 u32 flags;
140};
141
142static int
143_nouveau_gpuobj_ctor(struct nouveau_object *parent,
144 struct nouveau_object *engine,
145 struct nouveau_oclass *oclass, void *data, u32 size,
146 struct nouveau_object **pobject)
147{
148 struct nouveau_gpuobj_class *args = data;
149 struct nouveau_gpuobj *object;
150 int ret;
151
152 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, args->pargpu,
153 args->size, args->align, args->flags,
154 &object);
155 *pobject = nv_object(object);
156 if (ret)
157 return ret;
158
159 return 0;
160}
161
162void
163_nouveau_gpuobj_dtor(struct nouveau_object *object)
164{
165 nouveau_gpuobj_destroy(nv_gpuobj(object));
166}
167
168int
169_nouveau_gpuobj_init(struct nouveau_object *object)
170{
171 return nouveau_gpuobj_init(nv_gpuobj(object));
172}
173
174int
175_nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
176{
177 return nouveau_gpuobj_fini(nv_gpuobj(object), suspend);
178}
179
180u32
181_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
182{
183 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
184 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
185 if (gpuobj->node)
186 addr += gpuobj->node->offset;
187 return pfuncs->rd32(gpuobj->parent, addr);
188}
189
190void
191_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
192{
193 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
194 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
195 if (gpuobj->node)
196 addr += gpuobj->node->offset;
197 pfuncs->wr32(gpuobj->parent, addr, data);
198}
199
200static struct nouveau_oclass
201_nouveau_gpuobj_oclass = {
202 .handle = 0x00000000,
203 .ofuncs = &(struct nouveau_ofuncs) {
204 .ctor = _nouveau_gpuobj_ctor,
205 .dtor = _nouveau_gpuobj_dtor,
206 .init = _nouveau_gpuobj_init,
207 .fini = _nouveau_gpuobj_fini,
208 .rd32 = _nouveau_gpuobj_rd32,
209 .wr32 = _nouveau_gpuobj_wr32,
210 },
211};
212
213int
214nouveau_gpuobj_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
215 u32 size, u32 align, u32 flags,
216 struct nouveau_gpuobj **pgpuobj)
217{
218 struct nouveau_object *engine = parent;
219 struct nouveau_gpuobj_class args = {
220 .pargpu = pargpu,
221 .size = size,
222 .align = align,
223 .flags = flags,
224 };
225
226 if (!nv_iclass(engine, NV_SUBDEV_CLASS))
227 engine = engine->engine;
228 BUG_ON(engine == NULL);
229
230 return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
231 &args, sizeof(args),
232 (struct nouveau_object **)pgpuobj);
233}
234
235int
236nouveau_gpuobj_map(struct nouveau_gpuobj *gpuobj, u32 access,
237 struct nouveau_vma *vma)
238{
239 struct nouveau_bar *bar = nouveau_bar(gpuobj);
240 int ret = -EINVAL;
241
242 if (bar && bar->umap) {
243 struct nouveau_instobj *iobj = (void *)
244 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
245 struct nouveau_mem **mem = (void *)(iobj + 1);
246 ret = bar->umap(bar, *mem, access, vma);
247 }
248
249 return ret;
250}
251
252int
253nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
254 u32 access, struct nouveau_vma *vma)
255{
256 struct nouveau_instobj *iobj = (void *)
257 nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS);
258 struct nouveau_mem **mem = (void *)(iobj + 1);
259 int ret;
260
261 ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma);
262 if (ret)
263 return ret;
264
265 nouveau_vm_map(vma, *mem);
266 return 0;
267}
268
269void
270nouveau_gpuobj_unmap(struct nouveau_vma *vma)
271{
272 if (vma->node) {
273 nouveau_vm_unmap(vma);
274 nouveau_vm_put(vma);
275 }
276}
277
278/* the below is basically only here to support sharing the paged dma object
279 * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work
280 * anywhere else.
281 */
282
283static void
284nouveau_gpudup_dtor(struct nouveau_object *object)
285{
286 struct nouveau_gpuobj *gpuobj = (void *)object;
287 nouveau_object_ref(NULL, &gpuobj->parent);
288 nouveau_object_destroy(&gpuobj->base);
289}
290
291static struct nouveau_oclass
292nouveau_gpudup_oclass = {
293 .handle = NV_GPUOBJ_CLASS,
294 .ofuncs = &(struct nouveau_ofuncs) {
295 .dtor = nouveau_gpudup_dtor,
296 .init = nouveau_object_init,
297 .fini = nouveau_object_fini,
298 },
299};
300
301int
302nouveau_gpuobj_dup(struct nouveau_object *parent, struct nouveau_gpuobj *base,
303 struct nouveau_gpuobj **pgpuobj)
304{
305 struct nouveau_gpuobj *gpuobj;
306 int ret;
307
308 ret = nouveau_object_create(parent, parent->engine,
309 &nouveau_gpudup_oclass, 0, &gpuobj);
310 *pgpuobj = gpuobj;
311 if (ret)
312 return ret;
313
314 nouveau_object_ref(nv_object(base), &gpuobj->parent);
315 gpuobj->addr = base->addr;
316 gpuobj->size = base->size;
317 return 0;
318}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
new file mode 100644
index 000000000000..b8d2cbf8a7a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -0,0 +1,223 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/handle.h>
27#include <core/client.h>
28
29#define hprintk(h,l,f,a...) do { \
30 struct nouveau_client *c = nouveau_client((h)->object); \
31 struct nouveau_handle *p = (h)->parent; u32 n = p ? p->name : ~0; \
32 nv_printk((c), l, "0x%08x:0x%08x "f, n, (h)->name, ##a); \
33} while(0)
34
35int
36nouveau_handle_init(struct nouveau_handle *handle)
37{
38 struct nouveau_handle *item;
39 int ret;
40
41 hprintk(handle, TRACE, "init running\n");
42 ret = nouveau_object_inc(handle->object);
43 if (ret)
44 return ret;
45
46 hprintk(handle, TRACE, "init children\n");
47 list_for_each_entry(item, &handle->tree, head) {
48 ret = nouveau_handle_init(item);
49 if (ret)
50 goto fail;
51 }
52
53 hprintk(handle, TRACE, "init completed\n");
54 return 0;
55fail:
56 hprintk(handle, ERROR, "init failed with %d\n", ret);
57 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
58 nouveau_handle_fini(item, false);
59 }
60
61 nouveau_object_dec(handle->object, false);
62 return ret;
63}
64
65int
66nouveau_handle_fini(struct nouveau_handle *handle, bool suspend)
67{
68 static char *name[2] = { "fini", "suspend" };
69 struct nouveau_handle *item;
70 int ret;
71
72 hprintk(handle, TRACE, "%s children\n", name[suspend]);
73 list_for_each_entry(item, &handle->tree, head) {
74 ret = nouveau_handle_fini(item, suspend);
75 if (ret && suspend)
76 goto fail;
77 }
78
79 hprintk(handle, TRACE, "%s running\n", name[suspend]);
80 if (handle->object) {
81 ret = nouveau_object_dec(handle->object, suspend);
82 if (ret && suspend)
83 goto fail;
84 }
85
86 hprintk(handle, TRACE, "%s completed\n", name[suspend]);
87 return 0;
88fail:
89 hprintk(handle, ERROR, "%s failed with %d\n", name[suspend], ret);
90 list_for_each_entry_continue_reverse(item, &handle->tree, head) {
91 int rret = nouveau_handle_init(item);
92 if (rret)
93 hprintk(handle, FATAL, "failed to restart, %d\n", rret);
94 }
95
96 return ret;
97}
98
99int
100nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
101 struct nouveau_object *object,
102 struct nouveau_handle **phandle)
103{
104 struct nouveau_object *namedb;
105 struct nouveau_handle *handle;
106 int ret;
107
108 namedb = parent;
109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
110 namedb = namedb->parent;
111
112 handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL);
113 if (!handle)
114 return -ENOMEM;
115
116 INIT_LIST_HEAD(&handle->head);
117 INIT_LIST_HEAD(&handle->tree);
118 handle->name = _handle;
119 handle->priv = ~0;
120
121 ret = nouveau_namedb_insert(nv_namedb(namedb), _handle, object, handle);
122 if (ret) {
123 kfree(handle);
124 return ret;
125 }
126
127 if (nv_parent(parent)->object_attach) {
128 ret = nv_parent(parent)->object_attach(parent, object, _handle);
129 if (ret < 0) {
130 nouveau_handle_destroy(handle);
131 return ret;
132 }
133
134 handle->priv = ret;
135 }
136
137 if (object != namedb) {
138 while (!nv_iclass(namedb, NV_CLIENT_CLASS))
139 namedb = namedb->parent;
140
141 handle->parent = nouveau_namedb_get(nv_namedb(namedb), _parent);
142 if (handle->parent) {
143 list_add(&handle->head, &handle->parent->tree);
144 nouveau_namedb_put(handle->parent);
145 }
146 }
147
148 hprintk(handle, TRACE, "created\n");
149 return 0;
150}
151
152void
153nouveau_handle_destroy(struct nouveau_handle *handle)
154{
155 struct nouveau_handle *item, *temp;
156
157 hprintk(handle, TRACE, "destroy running\n");
158 list_for_each_entry_safe(item, temp, &handle->tree, head) {
159 nouveau_handle_destroy(item);
160 }
161 list_del(&handle->head);
162
163 if (handle->priv != ~0) {
164 struct nouveau_object *parent = handle->parent->object;
165 nv_parent(parent)->object_detach(parent, handle->priv);
166 }
167
168 hprintk(handle, TRACE, "destroy completed\n");
169 nouveau_namedb_remove(handle);
170 kfree(handle);
171}
172
173struct nouveau_object *
174nouveau_handle_ref(struct nouveau_object *parent, u32 name)
175{
176 struct nouveau_object *object = NULL;
177 struct nouveau_handle *handle;
178
179 while (!nv_iclass(parent, NV_NAMEDB_CLASS))
180 parent = parent->parent;
181
182 handle = nouveau_namedb_get(nv_namedb(parent), name);
183 if (handle) {
184 nouveau_object_ref(handle->object, &object);
185 nouveau_namedb_put(handle);
186 }
187
188 return object;
189}
190
191struct nouveau_handle *
192nouveau_handle_get_class(struct nouveau_object *engctx, u16 oclass)
193{
194 struct nouveau_namedb *namedb;
195 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
196 return nouveau_namedb_get_class(namedb, oclass);
197 return NULL;
198}
199
200struct nouveau_handle *
201nouveau_handle_get_vinst(struct nouveau_object *engctx, u64 vinst)
202{
203 struct nouveau_namedb *namedb;
204 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
205 return nouveau_namedb_get_vinst(namedb, vinst);
206 return NULL;
207}
208
209struct nouveau_handle *
210nouveau_handle_get_cinst(struct nouveau_object *engctx, u32 cinst)
211{
212 struct nouveau_namedb *namedb;
213 if (engctx && (namedb = (void *)nv_pclass(engctx, NV_NAMEDB_CLASS)))
214 return nouveau_namedb_get_cinst(namedb, cinst);
215 return NULL;
216}
217
218void
219nouveau_handle_put(struct nouveau_handle *handle)
220{
221 if (handle)
222 nouveau_namedb_put(handle);
223}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index 3e98806dd76f..bfddf87926dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,20 +22,52 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include "core/os.h"
26#include "nouveau_drv.h" 26#include "core/mm.h"
27#include "nouveau_mm.h"
28 27
29static inline void 28#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
30region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a) 29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
30
31void
32nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
31{ 33{
32 list_del(&a->nl_entry); 34 struct nouveau_mm_node *this = *pthis;
33 list_del(&a->fl_entry); 35
34 kfree(a); 36 if (this) {
37 struct nouveau_mm_node *prev = node(this, prev);
38 struct nouveau_mm_node *next = node(this, next);
39
40 if (prev && prev->type == 0) {
41 prev->length += this->length;
42 list_del(&this->nl_entry);
43 kfree(this); this = prev;
44 }
45
46 if (next && next->type == 0) {
47 next->offset = this->offset;
48 next->length += this->length;
49 if (this->type == 0)
50 list_del(&this->fl_entry);
51 list_del(&this->nl_entry);
52 kfree(this); this = NULL;
53 }
54
55 if (this && this->type != 0) {
56 list_for_each_entry(prev, &mm->free, fl_entry) {
57 if (this->offset < prev->offset)
58 break;
59 }
60
61 list_add_tail(&this->fl_entry, &prev->fl_entry);
62 this->type = 0;
63 }
64 }
65
66 *pthis = NULL;
35} 67}
36 68
37static struct nouveau_mm_node * 69static struct nouveau_mm_node *
38region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size) 70region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
39{ 71{
40 struct nouveau_mm_node *b; 72 struct nouveau_mm_node *b;
41 73
@@ -57,38 +89,12 @@ region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
57 return b; 89 return b;
58} 90}
59 91
60#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
62
63void
64nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
65{
66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
68
69 list_add(&this->fl_entry, &mm->free);
70 this->type = 0;
71
72 if (prev && prev->type == 0) {
73 prev->length += this->length;
74 region_put(mm, this);
75 this = prev;
76 }
77
78 if (next && next->type == 0) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(mm, this);
82 }
83}
84
85int 92int
86nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc, 93nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
87 u32 align, struct nouveau_mm_node **pnode) 94 u32 align, struct nouveau_mm_node **pnode)
88{ 95{
89 struct nouveau_mm_node *prev, *this, *next; 96 struct nouveau_mm_node *prev, *this, *next;
90 u32 min = size_nc ? size_nc : size; 97 u32 mask = align - 1;
91 u32 align_mask = align - 1;
92 u32 splitoff; 98 u32 splitoff;
93 u32 s, e; 99 u32 s, e;
94 100
@@ -104,16 +110,86 @@ nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
104 if (next && next->type != type) 110 if (next && next->type != type)
105 e = rounddown(e, mm->block_size); 111 e = rounddown(e, mm->block_size);
106 112
107 s = (s + align_mask) & ~align_mask; 113 s = (s + mask) & ~mask;
108 e &= ~align_mask; 114 e &= ~mask;
109 if (s > e || e - s < min) 115 if (s > e || e - s < size_min)
110 continue; 116 continue;
111 117
112 splitoff = s - this->offset; 118 splitoff = s - this->offset;
113 if (splitoff && !region_split(mm, this, splitoff)) 119 if (splitoff && !region_head(mm, this, splitoff))
120 return -ENOMEM;
121
122 this = region_head(mm, this, min(size_max, e - s));
123 if (!this)
124 return -ENOMEM;
125
126 this->type = type;
127 list_del(&this->fl_entry);
128 *pnode = this;
129 return 0;
130 }
131
132 return -ENOSPC;
133}
134
135static struct nouveau_mm_node *
136region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
137{
138 struct nouveau_mm_node *b;
139
140 if (a->length == size)
141 return a;
142
143 b = kmalloc(sizeof(*b), GFP_KERNEL);
144 if (unlikely(b == NULL))
145 return NULL;
146
147 a->length -= size;
148 b->offset = a->offset + a->length;
149 b->length = size;
150 b->type = a->type;
151
152 list_add(&b->nl_entry, &a->nl_entry);
153 if (b->type == 0)
154 list_add(&b->fl_entry, &a->fl_entry);
155 return b;
156}
157
158int
159nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
160 u32 align, struct nouveau_mm_node **pnode)
161{
162 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1;
164
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length;
167 u32 s = this->offset;
168 u32 c = 0, a;
169
170 prev = node(this, prev);
171 if (prev && prev->type != type)
172 s = roundup(s, mm->block_size);
173
174 next = node(this, next);
175 if (next && next->type != type) {
176 e = rounddown(e, mm->block_size);
177 c = next->offset - e;
178 }
179
180 s = (s + mask) & ~mask;
181 a = e - s;
182 if (s > e || a < size_min)
183 continue;
184
185 a = min(a, size_max);
186 s = (e - a) & ~mask;
187 c += (e - s) - a;
188
189 if (c && !region_tail(mm, this, c))
114 return -ENOMEM; 190 return -ENOMEM;
115 191
116 this = region_split(mm, this, min(size, e - s)); 192 this = region_tail(mm, this, a);
117 if (!this) 193 if (!this)
118 return -ENOMEM; 194 return -ENOMEM;
119 195
@@ -148,6 +224,7 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
148 list_add_tail(&node->nl_entry, &mm->nodes); 224 list_add_tail(&node->nl_entry, &mm->nodes);
149 list_add_tail(&node->fl_entry, &mm->free); 225 list_add_tail(&node->fl_entry, &mm->free);
150 mm->heap_nodes++; 226 mm->heap_nodes++;
227 mm->heap_size += length;
151 return 0; 228 return 0;
152} 229}
153 230
@@ -159,15 +236,8 @@ nouveau_mm_fini(struct nouveau_mm *mm)
159 int nodes = 0; 236 int nodes = 0;
160 237
161 list_for_each_entry(node, &mm->nodes, nl_entry) { 238 list_for_each_entry(node, &mm->nodes, nl_entry) {
162 if (nodes++ == mm->heap_nodes) { 239 if (nodes++ == mm->heap_nodes)
163 printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
164 list_for_each_entry(node, &mm->nodes, nl_entry) {
165 printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
166 node->type, node->offset, node->length);
167 }
168 WARN_ON(1);
169 return -EBUSY; 240 return -EBUSY;
170 }
171 } 241 }
172 242
173 kfree(heap); 243 kfree(heap);
diff --git a/drivers/gpu/drm/nouveau/core/core/namedb.c b/drivers/gpu/drm/nouveau/core/core/namedb.c
new file mode 100644
index 000000000000..1ce95a8709df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/namedb.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/namedb.h>
27#include <core/handle.h>
28#include <core/gpuobj.h>
29
30static struct nouveau_handle *
31nouveau_namedb_lookup(struct nouveau_namedb *namedb, u32 name)
32{
33 struct nouveau_handle *handle;
34
35 list_for_each_entry(handle, &namedb->list, node) {
36 if (handle->name == name)
37 return handle;
38 }
39
40 return NULL;
41}
42
43static struct nouveau_handle *
44nouveau_namedb_lookup_class(struct nouveau_namedb *namedb, u16 oclass)
45{
46 struct nouveau_handle *handle;
47
48 list_for_each_entry(handle, &namedb->list, node) {
49 if (nv_mclass(handle->object) == oclass)
50 return handle;
51 }
52
53 return NULL;
54}
55
56static struct nouveau_handle *
57nouveau_namedb_lookup_vinst(struct nouveau_namedb *namedb, u64 vinst)
58{
59 struct nouveau_handle *handle;
60
61 list_for_each_entry(handle, &namedb->list, node) {
62 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
63 if (nv_gpuobj(handle->object)->addr == vinst)
64 return handle;
65 }
66 }
67
68 return NULL;
69}
70
71static struct nouveau_handle *
72nouveau_namedb_lookup_cinst(struct nouveau_namedb *namedb, u32 cinst)
73{
74 struct nouveau_handle *handle;
75
76 list_for_each_entry(handle, &namedb->list, node) {
77 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
78 if (nv_gpuobj(handle->object)->node &&
79 nv_gpuobj(handle->object)->node->offset == cinst)
80 return handle;
81 }
82 }
83
84 return NULL;
85}
86
87int
88nouveau_namedb_insert(struct nouveau_namedb *namedb, u32 name,
89 struct nouveau_object *object,
90 struct nouveau_handle *handle)
91{
92 int ret = -EEXIST;
93 write_lock_irq(&namedb->lock);
94 if (!nouveau_namedb_lookup(namedb, name)) {
95 nouveau_object_ref(object, &handle->object);
96 handle->namedb = namedb;
97 list_add(&handle->node, &namedb->list);
98 ret = 0;
99 }
100 write_unlock_irq(&namedb->lock);
101 return ret;
102}
103
104void
105nouveau_namedb_remove(struct nouveau_handle *handle)
106{
107 struct nouveau_namedb *namedb = handle->namedb;
108 struct nouveau_object *object = handle->object;
109 write_lock_irq(&namedb->lock);
110 list_del(&handle->node);
111 write_unlock_irq(&namedb->lock);
112 nouveau_object_ref(NULL, &object);
113}
114
115struct nouveau_handle *
116nouveau_namedb_get(struct nouveau_namedb *namedb, u32 name)
117{
118 struct nouveau_handle *handle;
119 read_lock(&namedb->lock);
120 handle = nouveau_namedb_lookup(namedb, name);
121 if (handle == NULL)
122 read_unlock(&namedb->lock);
123 return handle;
124}
125
126struct nouveau_handle *
127nouveau_namedb_get_class(struct nouveau_namedb *namedb, u16 oclass)
128{
129 struct nouveau_handle *handle;
130 read_lock(&namedb->lock);
131 handle = nouveau_namedb_lookup_class(namedb, oclass);
132 if (handle == NULL)
133 read_unlock(&namedb->lock);
134 return handle;
135}
136
137struct nouveau_handle *
138nouveau_namedb_get_vinst(struct nouveau_namedb *namedb, u64 vinst)
139{
140 struct nouveau_handle *handle;
141 read_lock(&namedb->lock);
142 handle = nouveau_namedb_lookup_vinst(namedb, vinst);
143 if (handle == NULL)
144 read_unlock(&namedb->lock);
145 return handle;
146}
147
148struct nouveau_handle *
149nouveau_namedb_get_cinst(struct nouveau_namedb *namedb, u32 cinst)
150{
151 struct nouveau_handle *handle;
152 read_lock(&namedb->lock);
153 handle = nouveau_namedb_lookup_cinst(namedb, cinst);
154 if (handle == NULL)
155 read_unlock(&namedb->lock);
156 return handle;
157}
158
159void
160nouveau_namedb_put(struct nouveau_handle *handle)
161{
162 if (handle)
163 read_unlock(&handle->namedb->lock);
164}
165
166int
167nouveau_namedb_create_(struct nouveau_object *parent,
168 struct nouveau_object *engine,
169 struct nouveau_oclass *oclass, u32 pclass,
170 struct nouveau_oclass *sclass, u32 engcls,
171 int length, void **pobject)
172{
173 struct nouveau_namedb *namedb;
174 int ret;
175
176 ret = nouveau_parent_create_(parent, engine, oclass, pclass |
177 NV_NAMEDB_CLASS, sclass, engcls,
178 length, pobject);
179 namedb = *pobject;
180 if (ret)
181 return ret;
182
183 rwlock_init(&namedb->lock);
184 INIT_LIST_HEAD(&namedb->list);
185 return 0;
186}
187
188int
189_nouveau_namedb_ctor(struct nouveau_object *parent,
190 struct nouveau_object *engine,
191 struct nouveau_oclass *oclass, void *data, u32 size,
192 struct nouveau_object **pobject)
193{
194 struct nouveau_namedb *object;
195 int ret;
196
197 ret = nouveau_namedb_create(parent, engine, oclass, 0, NULL, 0, &object);
198 *pobject = nv_object(object);
199 if (ret)
200 return ret;
201
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
new file mode 100644
index 000000000000..0daab62ea14c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/parent.h>
27#include <core/namedb.h>
28#include <core/handle.h>
29#include <core/engine.h>
30
31#ifdef NOUVEAU_OBJECT_MAGIC
32static struct list_head _objlist = LIST_HEAD_INIT(_objlist);
33static DEFINE_SPINLOCK(_objlist_lock);
34#endif
35
36int
37nouveau_object_create_(struct nouveau_object *parent,
38 struct nouveau_object *engine,
39 struct nouveau_oclass *oclass, u32 pclass,
40 int size, void **pobject)
41{
42 struct nouveau_object *object;
43
44 object = *pobject = kzalloc(size, GFP_KERNEL);
45 if (!object)
46 return -ENOMEM;
47
48 nouveau_object_ref(parent, &object->parent);
49 nouveau_object_ref(engine, &object->engine);
50 object->oclass = oclass;
51 object->oclass->handle |= pclass;
52 atomic_set(&object->refcount, 1);
53 atomic_set(&object->usecount, 0);
54
55#ifdef NOUVEAU_OBJECT_MAGIC
56 object->_magic = NOUVEAU_OBJECT_MAGIC;
57 spin_lock(&_objlist_lock);
58 list_add(&object->list, &_objlist);
59 spin_unlock(&_objlist_lock);
60#endif
61 return 0;
62}
63
64static int
65_nouveau_object_ctor(struct nouveau_object *parent,
66 struct nouveau_object *engine,
67 struct nouveau_oclass *oclass, void *data, u32 size,
68 struct nouveau_object **pobject)
69{
70 struct nouveau_object *object;
71 int ret;
72
73 ret = nouveau_object_create(parent, engine, oclass, 0, &object);
74 *pobject = nv_object(object);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81void
82nouveau_object_destroy(struct nouveau_object *object)
83{
84#ifdef NOUVEAU_OBJECT_MAGIC
85 spin_lock(&_objlist_lock);
86 list_del(&object->list);
87 spin_unlock(&_objlist_lock);
88#endif
89 nouveau_object_ref(NULL, &object->engine);
90 nouveau_object_ref(NULL, &object->parent);
91 kfree(object);
92}
93
94static void
95_nouveau_object_dtor(struct nouveau_object *object)
96{
97 nouveau_object_destroy(object);
98}
99
100int
101nouveau_object_init(struct nouveau_object *object)
102{
103 return 0;
104}
105
106static int
107_nouveau_object_init(struct nouveau_object *object)
108{
109 return nouveau_object_init(object);
110}
111
112int
113nouveau_object_fini(struct nouveau_object *object, bool suspend)
114{
115 return 0;
116}
117
118static int
119_nouveau_object_fini(struct nouveau_object *object, bool suspend)
120{
121 return nouveau_object_fini(object, suspend);
122}
123
124struct nouveau_ofuncs
125nouveau_object_ofuncs = {
126 .ctor = _nouveau_object_ctor,
127 .dtor = _nouveau_object_dtor,
128 .init = _nouveau_object_init,
129 .fini = _nouveau_object_fini,
130};
131
132int
133nouveau_object_ctor(struct nouveau_object *parent,
134 struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nouveau_ofuncs *ofuncs = oclass->ofuncs;
139 int ret;
140
141 *pobject = NULL;
142
143 ret = ofuncs->ctor(parent, engine, oclass, data, size, pobject);
144 if (ret < 0) {
145 if (ret != -ENODEV) {
146 nv_error(parent, "failed to create 0x%08x, %d\n",
147 oclass->handle, ret);
148 }
149
150 if (*pobject) {
151 ofuncs->dtor(*pobject);
152 *pobject = NULL;
153 }
154
155 return ret;
156 }
157
158 nv_debug(*pobject, "created\n");
159 return 0;
160}
161
162static void
163nouveau_object_dtor(struct nouveau_object *object)
164{
165 nv_debug(object, "destroying\n");
166 nv_ofuncs(object)->dtor(object);
167}
168
169void
170nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
171{
172 if (obj) {
173 atomic_inc(&obj->refcount);
174 nv_trace(obj, "inc() == %d\n", atomic_read(&obj->refcount));
175 }
176
177 if (*ref) {
178 int dead = atomic_dec_and_test(&(*ref)->refcount);
179 nv_trace(*ref, "dec() == %d\n", atomic_read(&(*ref)->refcount));
180 if (dead)
181 nouveau_object_dtor(*ref);
182 }
183
184 *ref = obj;
185}
186
187int
188nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
189 u16 _oclass, void *data, u32 size,
190 struct nouveau_object **pobject)
191{
192 struct nouveau_object *parent = NULL;
193 struct nouveau_object *engctx = NULL;
194 struct nouveau_object *object = NULL;
195 struct nouveau_object *engine;
196 struct nouveau_oclass *oclass;
197 struct nouveau_handle *handle;
198 int ret;
199
200 /* lookup parent object and ensure it *is* a parent */
201 parent = nouveau_handle_ref(client, _parent);
202 if (!parent) {
203 nv_error(client, "parent 0x%08x not found\n", _parent);
204 return -ENOENT;
205 }
206
207 if (!nv_iclass(parent, NV_PARENT_CLASS)) {
208 nv_error(parent, "cannot have children\n");
209 ret = -EINVAL;
210 goto fail_class;
211 }
212
213 /* check that parent supports the requested subclass */
214 ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
215 if (ret) {
216 nv_debug(parent, "illegal class 0x%04x\n", _oclass);
217 goto fail_class;
218 }
219
220 /* make sure engine init has been completed *before* any objects
221 * it controls are created - the constructors may depend on
222 * state calculated at init (ie. default context construction)
223 */
224 if (engine) {
225 ret = nouveau_object_inc(engine);
226 if (ret)
227 goto fail_class;
228 }
229
230 /* if engine requires it, create a context object to insert
231 * between the parent and its children (eg. PGRAPH context)
232 */
233 if (engine && nv_engine(engine)->cclass) {
234 ret = nouveau_object_ctor(parent, engine,
235 nv_engine(engine)->cclass,
236 data, size, &engctx);
237 if (ret)
238 goto fail_engctx;
239 } else {
240 nouveau_object_ref(parent, &engctx);
241 }
242
243 /* finally, create new object and bind it to its handle */
244 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
245 *pobject = object;
246 if (ret)
247 goto fail_ctor;
248
249 ret = nouveau_object_inc(object);
250 if (ret)
251 goto fail_init;
252
253 ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
254 if (ret)
255 goto fail_handle;
256
257 ret = nouveau_handle_init(handle);
258 if (ret)
259 nouveau_handle_destroy(handle);
260
261fail_handle:
262 nouveau_object_dec(object, false);
263fail_init:
264 nouveau_object_ref(NULL, &object);
265fail_ctor:
266 nouveau_object_ref(NULL, &engctx);
267fail_engctx:
268 if (engine)
269 nouveau_object_dec(engine, false);
270fail_class:
271 nouveau_object_ref(NULL, &parent);
272 return ret;
273}
274
275int
276nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
277{
278 struct nouveau_object *parent = NULL;
279 struct nouveau_object *namedb = NULL;
280 struct nouveau_handle *handle = NULL;
281 int ret = -EINVAL;
282
283 parent = nouveau_handle_ref(client, _parent);
284 if (!parent)
285 return -ENOENT;
286
287 namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
288 if (namedb) {
289 handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
290 if (handle) {
291 nouveau_namedb_put(handle);
292 nouveau_handle_fini(handle, false);
293 nouveau_handle_destroy(handle);
294 }
295 }
296
297 nouveau_object_ref(NULL, &parent);
298 return ret;
299}
300
301int
302nouveau_object_inc(struct nouveau_object *object)
303{
304 int ref = atomic_add_return(1, &object->usecount);
305 int ret;
306
307 nv_trace(object, "use(+1) == %d\n", atomic_read(&object->usecount));
308 if (ref != 1)
309 return 0;
310
311 nv_trace(object, "initialising...\n");
312 if (object->parent) {
313 ret = nouveau_object_inc(object->parent);
314 if (ret) {
315 nv_error(object, "parent failed, %d\n", ret);
316 goto fail_parent;
317 }
318 }
319
320 if (object->engine) {
321 mutex_lock(&nv_subdev(object->engine)->mutex);
322 ret = nouveau_object_inc(object->engine);
323 mutex_unlock(&nv_subdev(object->engine)->mutex);
324 if (ret) {
325 nv_error(object, "engine failed, %d\n", ret);
326 goto fail_engine;
327 }
328 }
329
330 ret = nv_ofuncs(object)->init(object);
331 if (ret) {
332 nv_error(object, "init failed, %d\n", ret);
333 goto fail_self;
334 }
335
336 nv_debug(object, "initialised\n");
337 return 0;
338
339fail_self:
340 if (object->engine) {
341 mutex_lock(&nv_subdev(object->engine)->mutex);
342 nouveau_object_dec(object->engine, false);
343 mutex_unlock(&nv_subdev(object->engine)->mutex);
344 }
345fail_engine:
346 if (object->parent)
347 nouveau_object_dec(object->parent, false);
348fail_parent:
349 atomic_dec(&object->usecount);
350 return ret;
351}
352
353static int
354nouveau_object_decf(struct nouveau_object *object)
355{
356 int ret;
357
358 nv_trace(object, "stopping...\n");
359
360 ret = nv_ofuncs(object)->fini(object, false);
361 if (ret)
362 nv_warn(object, "failed fini, %d\n", ret);
363
364 if (object->engine) {
365 mutex_lock(&nv_subdev(object->engine)->mutex);
366 nouveau_object_dec(object->engine, false);
367 mutex_unlock(&nv_subdev(object->engine)->mutex);
368 }
369
370 if (object->parent)
371 nouveau_object_dec(object->parent, false);
372
373 nv_debug(object, "stopped\n");
374 return 0;
375}
376
377static int
378nouveau_object_decs(struct nouveau_object *object)
379{
380 int ret, rret;
381
382 nv_trace(object, "suspending...\n");
383
384 ret = nv_ofuncs(object)->fini(object, true);
385 if (ret) {
386 nv_error(object, "failed suspend, %d\n", ret);
387 return ret;
388 }
389
390 if (object->engine) {
391 mutex_lock(&nv_subdev(object->engine)->mutex);
392 ret = nouveau_object_dec(object->engine, true);
393 mutex_unlock(&nv_subdev(object->engine)->mutex);
394 if (ret) {
395 nv_warn(object, "engine failed suspend, %d\n", ret);
396 goto fail_engine;
397 }
398 }
399
400 if (object->parent) {
401 ret = nouveau_object_dec(object->parent, true);
402 if (ret) {
403 nv_warn(object, "parent failed suspend, %d\n", ret);
404 goto fail_parent;
405 }
406 }
407
408 nv_debug(object, "suspended\n");
409 return 0;
410
411fail_parent:
412 if (object->engine) {
413 mutex_lock(&nv_subdev(object->engine)->mutex);
414 rret = nouveau_object_inc(object->engine);
415 mutex_unlock(&nv_subdev(object->engine)->mutex);
416 if (rret)
417 nv_fatal(object, "engine failed to reinit, %d\n", rret);
418 }
419
420fail_engine:
421 rret = nv_ofuncs(object)->init(object);
422 if (rret)
423 nv_fatal(object, "failed to reinit, %d\n", rret);
424
425 return ret;
426}
427
428int
429nouveau_object_dec(struct nouveau_object *object, bool suspend)
430{
431 int ref = atomic_add_return(-1, &object->usecount);
432 int ret;
433
434 nv_trace(object, "use(-1) == %d\n", atomic_read(&object->usecount));
435
436 if (ref == 0) {
437 if (suspend)
438 ret = nouveau_object_decs(object);
439 else
440 ret = nouveau_object_decf(object);
441
442 if (ret) {
443 atomic_inc(&object->usecount);
444 return ret;
445 }
446 }
447
448 return 0;
449}
450
451void
452nouveau_object_debug(void)
453{
454#ifdef NOUVEAU_OBJECT_MAGIC
455 struct nouveau_object *object;
456 if (!list_empty(&_objlist)) {
457 nv_fatal(NULL, "*******************************************\n");
458 nv_fatal(NULL, "* AIIIII! object(s) still exist!!!\n");
459 nv_fatal(NULL, "*******************************************\n");
460 list_for_each_entry(object, &_objlist, list) {
461 nv_fatal(object, "%p/%p/%d/%d\n",
462 object->parent, object->engine,
463 atomic_read(&object->refcount),
464 atomic_read(&object->usecount));
465 }
466 }
467#endif
468}
diff --git a/drivers/gpu/drm/nouveau/core/core/option.c b/drivers/gpu/drm/nouveau/core/core/option.c
new file mode 100644
index 000000000000..62a432ea39e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/option.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26#include <core/debug.h>
27
28/* compares unterminated string 'str' with zero-terminated string 'cmp' */
29static inline int
30strncasecmpz(const char *str, const char *cmp, size_t len)
31{
32 if (strlen(cmp) != len)
33 return len;
34 return strncasecmp(str, cmp, len);
35}
36
37const char *
38nouveau_stropt(const char *optstr, const char *opt, int *arglen)
39{
40 while (optstr && *optstr != '\0') {
41 int len = strcspn(optstr, ",=");
42 switch (optstr[len]) {
43 case '=':
44 if (!strncasecmpz(optstr, opt, len)) {
45 optstr += len + 1;
46 *arglen = strcspn(optstr, ",=");
47 return *arglen ? optstr : NULL;
48 }
49 optstr++;
50 break;
51 case ',':
52 optstr++;
53 break;
54 default:
55 break;
56 }
57 optstr += len;
58 }
59
60 return NULL;
61}
62
63bool
64nouveau_boolopt(const char *optstr, const char *opt, bool value)
65{
66 int arglen;
67
68 optstr = nouveau_stropt(optstr, opt, &arglen);
69 if (optstr) {
70 if (!strncasecmpz(optstr, "0", arglen) ||
71 !strncasecmpz(optstr, "no", arglen) ||
72 !strncasecmpz(optstr, "off", arglen) ||
73 !strncasecmpz(optstr, "false", arglen))
74 value = false;
75 else
76 if (!strncasecmpz(optstr, "1", arglen) ||
77 !strncasecmpz(optstr, "yes", arglen) ||
78 !strncasecmpz(optstr, "on", arglen) ||
79 !strncasecmpz(optstr, "true", arglen))
80 value = true;
81 }
82
83 return value;
84}
85
86int
87nouveau_dbgopt(const char *optstr, const char *sub)
88{
89 int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT;
90
91 while (optstr) {
92 int len = strcspn(optstr, ",=");
93 switch (optstr[len]) {
94 case '=':
95 if (strncasecmpz(optstr, sub, len))
96 mode = 0;
97 optstr++;
98 break;
99 default:
100 if (mode) {
101 if (!strncasecmpz(optstr, "fatal", len))
102 level = NV_DBG_FATAL;
103 else if (!strncasecmpz(optstr, "error", len))
104 level = NV_DBG_ERROR;
105 else if (!strncasecmpz(optstr, "warn", len))
106 level = NV_DBG_WARN;
107 else if (!strncasecmpz(optstr, "info", len))
108 level = NV_DBG_INFO;
109 else if (!strncasecmpz(optstr, "debug", len))
110 level = NV_DBG_DEBUG;
111 else if (!strncasecmpz(optstr, "trace", len))
112 level = NV_DBG_TRACE;
113 else if (!strncasecmpz(optstr, "paranoia", len))
114 level = NV_DBG_PARANOIA;
115 else if (!strncasecmpz(optstr, "spam", len))
116 level = NV_DBG_SPAM;
117 }
118
119 if (optstr[len] != '\0') {
120 optstr++;
121 mode = 1;
122 break;
123 }
124
125 return level;
126 }
127 optstr += len;
128 }
129
130 return level;
131}
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
new file mode 100644
index 000000000000..a1ea034611d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/parent.h>
27
28int
29nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
30 struct nouveau_object **pengine,
31 struct nouveau_oclass **poclass)
32{
33 struct nouveau_sclass *sclass;
34 struct nouveau_engine *engine;
35 struct nouveau_oclass *oclass;
36 u64 mask;
37
38 sclass = nv_parent(parent)->sclass;
39 while (sclass) {
40 if ((sclass->oclass->handle & 0xffff) == handle) {
41 *pengine = parent->engine;
42 *poclass = sclass->oclass;
43 return 0;
44 }
45
46 sclass = sclass->sclass;
47 }
48
49 mask = nv_parent(parent)->engine;
50 while (mask) {
51 int i = ffsll(mask) - 1;
52
53 if ((engine = nouveau_engine(parent, i))) {
54 oclass = engine->sclass;
55 while (oclass->ofuncs) {
56 if ((oclass->handle & 0xffff) == handle) {
57 *pengine = nv_object(engine);
58 *poclass = oclass;
59 return 0;
60 }
61 oclass++;
62 }
63 }
64
65 mask &= ~(1ULL << i);
66 }
67
68 return -EINVAL;
69}
70
71int
72nouveau_parent_create_(struct nouveau_object *parent,
73 struct nouveau_object *engine,
74 struct nouveau_oclass *oclass, u32 pclass,
75 struct nouveau_oclass *sclass, u64 engcls,
76 int size, void **pobject)
77{
78 struct nouveau_parent *object;
79 struct nouveau_sclass *nclass;
80 int ret;
81
82 ret = nouveau_object_create_(parent, engine, oclass, pclass |
83 NV_PARENT_CLASS, size, pobject);
84 object = *pobject;
85 if (ret)
86 return ret;
87
88 while (sclass && sclass->ofuncs) {
89 nclass = kzalloc(sizeof(*nclass), GFP_KERNEL);
90 if (!nclass)
91 return -ENOMEM;
92
93 nclass->sclass = object->sclass;
94 object->sclass = nclass;
95 nclass->engine = engine ? nv_engine(engine) : NULL;
96 nclass->oclass = sclass;
97 sclass++;
98 }
99
100 object->engine = engcls;
101 return 0;
102}
103
104int
105_nouveau_parent_ctor(struct nouveau_object *parent,
106 struct nouveau_object *engine,
107 struct nouveau_oclass *oclass, void *data, u32 size,
108 struct nouveau_object **pobject)
109{
110 struct nouveau_parent *object;
111 int ret;
112
113 ret = nouveau_parent_create(parent, engine, oclass, 0, NULL, 0, &object);
114 *pobject = nv_object(object);
115 if (ret)
116 return ret;
117
118 return 0;
119}
120
121void
122nouveau_parent_destroy(struct nouveau_parent *parent)
123{
124 struct nouveau_sclass *sclass;
125
126 while ((sclass = parent->sclass)) {
127 parent->sclass = sclass->sclass;
128 kfree(sclass);
129 }
130
131 nouveau_object_destroy(&parent->base);
132}
133
134
135void
136_nouveau_parent_dtor(struct nouveau_object *object)
137{
138 nouveau_parent_destroy(nv_parent(object));
139}
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
new file mode 100644
index 000000000000..6161eaf5447c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/subdev.h>
28#include <core/printk.h>
29
30void
31nv_printk_(struct nouveau_object *object, const char *pfx, int level,
32 const char *fmt, ...)
33{
34 static const char name[] = { '!', 'E', 'W', ' ', 'D', 'T', 'P', 'S' };
35 char mfmt[256];
36 va_list args;
37
38 if (object && !nv_iclass(object, NV_CLIENT_CLASS)) {
39 struct nouveau_object *device = object;
40 struct nouveau_object *subdev = object;
41 char obuf[64], *ofmt = "";
42
43 if (object->engine) {
44 snprintf(obuf, sizeof(obuf), "[0x%08x][%p]",
45 nv_hclass(object), object);
46 ofmt = obuf;
47 subdev = object->engine;
48 device = object->engine;
49 }
50
51 if (subdev->parent)
52 device = subdev->parent;
53
54 if (level > nv_subdev(subdev)->debug)
55 return;
56
57 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s][%s]%s %s", pfx,
58 name[level], nv_subdev(subdev)->name,
59 nv_device(device)->name, ofmt, fmt);
60 } else
61 if (object && nv_iclass(object, NV_CLIENT_CLASS)) {
62 if (level > nv_client(object)->debug)
63 return;
64
65 snprintf(mfmt, sizeof(mfmt), "%snouveau %c[%8s] %s", pfx,
66 name[level], nv_client(object)->name, fmt);
67 } else {
68 snprintf(mfmt, sizeof(mfmt), "%snouveau: %s", pfx, fmt);
69 }
70
71 va_start(args, fmt);
72 vprintk(mfmt, args);
73 va_end(args);
74}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
new file mode 100644
index 000000000000..86a64045dd60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <core/object.h>
24#include <core/ramht.h>
25#include <core/math.h>
26
27#include <subdev/bar.h>
28
29static u32
30nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
31{
32 u32 hash = 0;
33
34 while (handle) {
35 hash ^= (handle & ((1 << ramht->bits) - 1));
36 handle >>= ramht->bits;
37 }
38
39 hash ^= chid << (ramht->bits - 4);
40 hash = hash << 3;
41 return hash;
42}
43
44int
45nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
46 u32 handle, u32 context)
47{
48 struct nouveau_bar *bar = nouveau_bar(ramht);
49 u32 co, ho;
50
51 co = ho = nouveau_ramht_hash(ramht, chid, handle);
52 do {
53 if (!nv_ro32(ramht, co + 4)) {
54 nv_wo32(ramht, co + 0, handle);
55 nv_wo32(ramht, co + 4, context);
56 if (bar)
57 bar->flush(bar);
58 return co;
59 }
60
61 co += 8;
62 if (co >= nv_gpuobj(ramht)->size)
63 co = 0;
64 } while (co != ho);
65
66 return -ENOMEM;
67}
68
69void
70nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
71{
72 struct nouveau_bar *bar = nouveau_bar(ramht);
73 nv_wo32(ramht, cookie + 0, 0x00000000);
74 nv_wo32(ramht, cookie + 4, 0x00000000);
75 if (bar)
76 bar->flush(bar);
77}
78
79static struct nouveau_oclass
80nouveau_ramht_oclass = {
81 .handle = 0x0000abcd,
82 .ofuncs = &(struct nouveau_ofuncs) {
83 .ctor = NULL,
84 .dtor = _nouveau_gpuobj_dtor,
85 .init = _nouveau_gpuobj_init,
86 .fini = _nouveau_gpuobj_fini,
87 .rd32 = _nouveau_gpuobj_rd32,
88 .wr32 = _nouveau_gpuobj_wr32,
89 },
90};
91
92int
93nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
94 u32 size, u32 align, struct nouveau_ramht **pramht)
95{
96 struct nouveau_ramht *ramht;
97 int ret;
98
99 ret = nouveau_gpuobj_create(parent, parent->engine ?
100 parent->engine : parent, /* <nv50 ramht */
101 &nouveau_ramht_oclass, 0, pargpu, size,
102 align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
103 *pramht = ramht;
104 if (ret)
105 return ret;
106
107 ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
108 return 0;
109}
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
new file mode 100644
index 000000000000..f74c30aa33a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/subdev.h>
27#include <core/device.h>
28#include <core/option.h>
29
30void
31nouveau_subdev_reset(struct nouveau_object *subdev)
32{
33 nv_trace(subdev, "resetting...\n");
34 nv_ofuncs(subdev)->fini(subdev, false);
35 nv_debug(subdev, "reset\n");
36}
37
38int
39nouveau_subdev_init(struct nouveau_subdev *subdev)
40{
41 int ret = nouveau_object_init(&subdev->base);
42 if (ret)
43 return ret;
44
45 nouveau_subdev_reset(&subdev->base);
46 return 0;
47}
48
49int
50_nouveau_subdev_init(struct nouveau_object *object)
51{
52 return nouveau_subdev_init(nv_subdev(object));
53}
54
55int
56nouveau_subdev_fini(struct nouveau_subdev *subdev, bool suspend)
57{
58 if (subdev->unit) {
59 nv_mask(subdev, 0x000200, subdev->unit, 0x00000000);
60 nv_mask(subdev, 0x000200, subdev->unit, subdev->unit);
61 }
62
63 return nouveau_object_fini(&subdev->base, suspend);
64}
65
66int
67_nouveau_subdev_fini(struct nouveau_object *object, bool suspend)
68{
69 return nouveau_subdev_fini(nv_subdev(object), suspend);
70}
71
72void
73nouveau_subdev_destroy(struct nouveau_subdev *subdev)
74{
75 int subidx = nv_hclass(subdev) & 0xff;
76 nv_device(subdev)->subdev[subidx] = NULL;
77 nouveau_object_destroy(&subdev->base);
78}
79
80void
81_nouveau_subdev_dtor(struct nouveau_object *object)
82{
83 nouveau_subdev_destroy(nv_subdev(object));
84}
85
86int
87nouveau_subdev_create_(struct nouveau_object *parent,
88 struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, u32 pclass,
90 const char *subname, const char *sysname,
91 int size, void **pobject)
92{
93 struct nouveau_subdev *subdev;
94 int ret;
95
96 ret = nouveau_object_create_(parent, engine, oclass, pclass |
97 NV_SUBDEV_CLASS, size, pobject);
98 subdev = *pobject;
99 if (ret)
100 return ret;
101
102 mutex_init(&subdev->mutex);
103 subdev->name = subname;
104
105 if (parent) {
106 struct nouveau_device *device = nv_device(parent);
107 int subidx = nv_hclass(subdev) & 0xff;
108
109 subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
110 subdev->mmio = nv_subdev(device)->mmio;
111 device->subdev[subidx] = *pobject;
112 }
113
114 return 0;
115}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
new file mode 100644
index 000000000000..66f7dfd907ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/bsp.h>
30
31struct nv84_bsp_priv {
32 struct nouveau_bsp base;
33};
34
35struct nv84_bsp_chan {
36 struct nouveau_bsp_chan base;
37};
38
39/*******************************************************************************
40 * BSP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_bsp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * BSP context
50 ******************************************************************************/
51
52static int
53nv84_bsp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_bsp_chan *priv;
59 int ret;
60
61 ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_bsp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_bsp_chan *priv = (void *)object;
74 nouveau_bsp_context_destroy(&priv->base);
75}
76
77static int
78nv84_bsp_context_init(struct nouveau_object *object)
79{
80 struct nv84_bsp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_bsp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_bsp_chan *priv = (void *)object;
94 return nouveau_bsp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_bsp_cclass = {
99 .handle = NV_ENGCTX(BSP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_bsp_context_ctor,
102 .dtor = nv84_bsp_context_dtor,
103 .init = nv84_bsp_context_init,
104 .fini = nv84_bsp_context_fini,
105 .rd32 = _nouveau_bsp_context_rd32,
106 .wr32 = _nouveau_bsp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * BSP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv84_bsp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_bsp_priv *priv;
125 int ret;
126
127 ret = nouveau_bsp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x04008000;
133 nv_subdev(priv)->intr = nv84_bsp_intr;
134 nv_engine(priv)->cclass = &nv84_bsp_cclass;
135 nv_engine(priv)->sclass = nv84_bsp_sclass;
136 return 0;
137}
138
139static void
140nv84_bsp_dtor(struct nouveau_object *object)
141{
142 struct nv84_bsp_priv *priv = (void *)object;
143 nouveau_bsp_destroy(&priv->base);
144}
145
146static int
147nv84_bsp_init(struct nouveau_object *object)
148{
149 struct nv84_bsp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_bsp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_bsp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_bsp_priv *priv = (void *)object;
163 return nouveau_bsp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_bsp_oclass = {
168 .handle = NV_ENGINE(BSP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_bsp_ctor,
171 .dtor = nv84_bsp_dtor,
172 .init = nv84_bsp_init,
173 .fini = nv84_bsp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
index 219850d53286..219850d53286 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
index 37d6de3c9d61..c92520f3ed46 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nva3.fuc.h
@@ -1,4 +1,4 @@
1u32 nva3_pcopy_data[] = { 1static u32 nva3_pcopy_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_dma */ 4/* 0x0004: ctx_dma */
@@ -183,7 +183,7 @@ u32 nva3_pcopy_data[] = {
183 0x00000800, 183 0x00000800,
184}; 184};
185 185
186u32 nva3_pcopy_code[] = { 186static u32 nva3_pcopy_code[] = {
187/* 0x0000: main */ 187/* 0x0000: main */
188 0x04fe04bd, 188 0x04fe04bd,
189 0x3517f000, 189 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
index cd879f31bb38..0d98c6c0958d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/fuc/nvc0.fuc.h
@@ -1,4 +1,4 @@
1u32 nvc0_pcopy_data[] = { 1static u32 nvc0_pcopy_data[] = {
2/* 0x0000: ctx_object */ 2/* 0x0000: ctx_object */
3 0x00000000, 3 0x00000000,
4/* 0x0004: ctx_query_address_high */ 4/* 0x0004: ctx_query_address_high */
@@ -171,7 +171,7 @@ u32 nvc0_pcopy_data[] = {
171 0x00000800, 171 0x00000800,
172}; 172};
173 173
174u32 nvc0_pcopy_code[] = { 174static u32 nvc0_pcopy_code[] = {
175/* 0x0000: main */ 175/* 0x0000: main */
176 0x04fe04bd, 176 0x04fe04bd,
177 0x3517f000, 177 0x3517f000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
new file mode 100644
index 000000000000..4df6da0af740
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -0,0 +1,222 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32
33#include <engine/fifo.h>
34#include <engine/copy.h>
35
36#include "fuc/nva3.fuc.h"
37
38struct nva3_copy_priv {
39 struct nouveau_copy base;
40};
41
42struct nva3_copy_chan {
43 struct nouveau_copy_chan base;
44};
45
46/*******************************************************************************
47 * Copy object classes
48 ******************************************************************************/
49
50static struct nouveau_oclass
51nva3_copy_sclass[] = {
52 { 0x85b5, &nouveau_object_ofuncs },
53 {}
54};
55
56/*******************************************************************************
57 * PCOPY context
58 ******************************************************************************/
59
60static int
61nva3_copy_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nva3_copy_chan *priv;
67 int ret;
68
69 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
70 NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass
79nva3_copy_cclass = {
80 .handle = NV_ENGCTX(COPY0, 0xa3),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nva3_copy_context_ctor,
83 .dtor = _nouveau_copy_context_dtor,
84 .init = _nouveau_copy_context_init,
85 .fini = _nouveau_copy_context_fini,
86 .rd32 = _nouveau_copy_context_rd32,
87 .wr32 = _nouveau_copy_context_wr32,
88
89 },
90};
91
92/*******************************************************************************
93 * PCOPY engine/subdev functions
94 ******************************************************************************/
95
96static const struct nouveau_enum nva3_copy_isr_error_name[] = {
97 { 0x0001, "ILLEGAL_MTHD" },
98 { 0x0002, "INVALID_ENUM" },
99 { 0x0003, "INVALID_BITFIELD" },
100 {}
101};
102
103static void
104nva3_copy_intr(struct nouveau_subdev *subdev)
105{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev);
108 struct nouveau_object *engctx;
109 struct nva3_copy_priv *priv = (void *)subdev;
110 u32 dispatch = nv_rd32(priv, 0x10401c);
111 u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
112 u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
113 u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
114 u32 addr = nv_rd32(priv, 0x104040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x104044);
118 int chid;
119
120 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx);
122
123 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR [");
125 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x104004, 0x00000040);
129 stat &= ~0x00000040;
130 }
131
132 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x104004, stat);
135 }
136
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx);
139}
140
141static int
142nva3_copy_tlb_flush(struct nouveau_engine *engine)
143{
144 nv50_vm_flush_engine(&engine->base, 0x0d);
145 return 0;
146}
147
148static int
149nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
150 struct nouveau_oclass *oclass, void *data, u32 size,
151 struct nouveau_object **pobject)
152{
153 bool enable = (nv_device(parent)->chipset != 0xaf);
154 struct nva3_copy_priv *priv;
155 int ret;
156
157 ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
158 *pobject = nv_object(priv);
159 if (ret)
160 return ret;
161
162 nv_subdev(priv)->unit = 0x00802000;
163 nv_subdev(priv)->intr = nva3_copy_intr;
164 nv_engine(priv)->cclass = &nva3_copy_cclass;
165 nv_engine(priv)->sclass = nva3_copy_sclass;
166 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
167 return 0;
168}
169
170static int
171nva3_copy_init(struct nouveau_object *object)
172{
173 struct nva3_copy_priv *priv = (void *)object;
174 int ret, i;
175
176 ret = nouveau_copy_init(&priv->base);
177 if (ret)
178 return ret;
179
180 /* disable all interrupts */
181 nv_wr32(priv, 0x104014, 0xffffffff);
182
183 /* upload ucode */
184 nv_wr32(priv, 0x1041c0, 0x01000000);
185 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
186 nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
187
188 nv_wr32(priv, 0x104180, 0x01000000);
189 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
190 if ((i & 0x3f) == 0)
191 nv_wr32(priv, 0x104188, i >> 6);
192 nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
193 }
194
195 /* start it running */
196 nv_wr32(priv, 0x10410c, 0x00000000);
197 nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
198 nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
199 return 0;
200}
201
202static int
203nva3_copy_fini(struct nouveau_object *object, bool suspend)
204{
205 struct nva3_copy_priv *priv = (void *)object;
206
207 nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
208 nv_wr32(priv, 0x104014, 0xffffffff);
209
210 return nouveau_copy_fini(&priv->base, suspend);
211}
212
213struct nouveau_oclass
214nva3_copy_oclass = {
215 .handle = NV_ENGINE(COPY0, 0xa3),
216 .ofuncs = &(struct nouveau_ofuncs) {
217 .ctor = nva3_copy_ctor,
218 .dtor = _nouveau_copy_dtor,
219 .init = nva3_copy_init,
220 .fini = nva3_copy_fini,
221 },
222};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
new file mode 100644
index 000000000000..06d4a8791055
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <engine/fifo.h>
31#include <engine/copy.h>
32
33#include "fuc/nvc0.fuc.h"
34
35struct nvc0_copy_priv {
36 struct nouveau_copy base;
37};
38
39struct nvc0_copy_chan {
40 struct nouveau_copy_chan base;
41};
42
43/*******************************************************************************
44 * Copy object classes
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_copy0_sclass[] = {
49 { 0x90b5, &nouveau_object_ofuncs },
50 {},
51};
52
53static struct nouveau_oclass
54nvc0_copy1_sclass[] = {
55 { 0x90b8, &nouveau_object_ofuncs },
56 {},
57};
58
59/*******************************************************************************
60 * PCOPY context
61 ******************************************************************************/
62
63static int
64nvc0_copy_context_ctor(struct nouveau_object *parent,
65 struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_copy_chan *priv;
70 int ret;
71
72 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
73 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81static struct nouveau_ofuncs
82nvc0_copy_context_ofuncs = {
83 .ctor = nvc0_copy_context_ctor,
84 .dtor = _nouveau_copy_context_dtor,
85 .init = _nouveau_copy_context_init,
86 .fini = _nouveau_copy_context_fini,
87 .rd32 = _nouveau_copy_context_rd32,
88 .wr32 = _nouveau_copy_context_wr32,
89};
90
91static struct nouveau_oclass
92nvc0_copy0_cclass = {
93 .handle = NV_ENGCTX(COPY0, 0xc0),
94 .ofuncs = &nvc0_copy_context_ofuncs,
95};
96
97static struct nouveau_oclass
98nvc0_copy1_cclass = {
99 .handle = NV_ENGCTX(COPY1, 0xc0),
100 .ofuncs = &nvc0_copy_context_ofuncs,
101};
102
103/*******************************************************************************
104 * PCOPY engine/subdev functions
105 ******************************************************************************/
106
107static const struct nouveau_enum nvc0_copy_isr_error_name[] = {
108 { 0x0001, "ILLEGAL_MTHD" },
109 { 0x0002, "INVALID_ENUM" },
110 { 0x0003, "INVALID_BITFIELD" },
111 {}
112};
113
114static void
115nvc0_copy_intr(struct nouveau_subdev *subdev)
116{
117 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
118 struct nouveau_engine *engine = nv_engine(subdev);
119 struct nouveau_object *engctx;
120 int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
121 struct nvc0_copy_priv *priv = (void *)subdev;
122 u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
123 u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
124 u32 stat = intr & disp & ~(disp >> 16);
125 u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
126 u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
127 u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
128 u32 mthd = (addr & 0x07ff) << 2;
129 u32 subc = (addr & 0x3800) >> 11;
130 u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
131 int chid;
132
133 engctx = nouveau_engctx_get(engine, inst);
134 chid = pfifo->chid(pfifo, engctx);
135
136 if (stat & 0x00000040) {
137 nv_error(priv, "DISPATCH_ERROR [");
138 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
139 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
140 chid, (u64)inst << 12, subc, mthd, data);
141 nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
142 stat &= ~0x00000040;
143 }
144
145 if (stat) {
146 nv_error(priv, "unhandled intr 0x%08x\n", stat);
147 nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
148 }
149
150 nouveau_engctx_put(engctx);
151}
152
153static int
154nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
155 struct nouveau_oclass *oclass, void *data, u32 size,
156 struct nouveau_object **pobject)
157{
158 struct nvc0_copy_priv *priv;
159 int ret;
160
161 if (nv_rd32(parent, 0x022500) & 0x00000100)
162 return -ENODEV;
163
164 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
165 *pobject = nv_object(priv);
166 if (ret)
167 return ret;
168
169 nv_subdev(priv)->unit = 0x00000040;
170 nv_subdev(priv)->intr = nvc0_copy_intr;
171 nv_engine(priv)->cclass = &nvc0_copy0_cclass;
172 nv_engine(priv)->sclass = nvc0_copy0_sclass;
173 return 0;
174}
175
176static int
177nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
178 struct nouveau_oclass *oclass, void *data, u32 size,
179 struct nouveau_object **pobject)
180{
181 struct nvc0_copy_priv *priv;
182 int ret;
183
184 if (nv_rd32(parent, 0x022500) & 0x00000200)
185 return -ENODEV;
186
187 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
188 *pobject = nv_object(priv);
189 if (ret)
190 return ret;
191
192 nv_subdev(priv)->unit = 0x00000080;
193 nv_subdev(priv)->intr = nvc0_copy_intr;
194 nv_engine(priv)->cclass = &nvc0_copy1_cclass;
195 nv_engine(priv)->sclass = nvc0_copy1_sclass;
196 return 0;
197}
198
199static int
200nvc0_copy_init(struct nouveau_object *object)
201{
202 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
203 struct nvc0_copy_priv *priv = (void *)object;
204 int ret, i;
205
206 ret = nouveau_copy_init(&priv->base);
207 if (ret)
208 return ret;
209
210 /* disable all interrupts */
211 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
212
213 /* upload ucode */
214 nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
215 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
216 nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
217
218 nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
219 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
220 if ((i & 0x3f) == 0)
221 nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
222 nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
223 }
224
225 /* start it running */
226 nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
227 nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
228 nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
229 nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
230 return 0;
231}
232
233static int
234nvc0_copy_fini(struct nouveau_object *object, bool suspend)
235{
236 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
237 struct nvc0_copy_priv *priv = (void *)object;
238
239 nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
240 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
241
242 return nouveau_copy_fini(&priv->base, suspend);
243}
244
245struct nouveau_oclass
246nvc0_copy0_oclass = {
247 .handle = NV_ENGINE(COPY0, 0xc0),
248 .ofuncs = &(struct nouveau_ofuncs) {
249 .ctor = nvc0_copy0_ctor,
250 .dtor = _nouveau_copy_dtor,
251 .init = nvc0_copy_init,
252 .fini = nvc0_copy_fini,
253 },
254};
255
256struct nouveau_oclass
257nvc0_copy1_oclass = {
258 .handle = NV_ENGINE(COPY1, 0xc0),
259 .ofuncs = &(struct nouveau_ofuncs) {
260 .ctor = nvc0_copy1_ctor,
261 .dtor = _nouveau_copy_dtor,
262 .init = nvc0_copy_init,
263 .fini = nvc0_copy_fini,
264 },
265};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
new file mode 100644
index 000000000000..2017c1579ac5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <engine/copy.h>
31
32struct nve0_copy_priv {
33 struct nouveau_copy base;
34};
35
36struct nve0_copy_chan {
37 struct nouveau_copy_chan base;
38};
39
40/*******************************************************************************
41 * Copy object classes
42 ******************************************************************************/
43
44static struct nouveau_oclass
45nve0_copy_sclass[] = {
46 { 0xa0b5, &nouveau_object_ofuncs },
47 {},
48};
49
50/*******************************************************************************
51 * PCOPY context
52 ******************************************************************************/
53
54static int
55nve0_copy_context_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nve0_copy_chan *priv;
61 int ret;
62
63 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
64 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
65 *pobject = nv_object(priv);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static struct nouveau_ofuncs
73nve0_copy_context_ofuncs = {
74 .ctor = nve0_copy_context_ctor,
75 .dtor = _nouveau_copy_context_dtor,
76 .init = _nouveau_copy_context_init,
77 .fini = _nouveau_copy_context_fini,
78 .rd32 = _nouveau_copy_context_rd32,
79 .wr32 = _nouveau_copy_context_wr32,
80};
81
82static struct nouveau_oclass
83nve0_copy_cclass = {
84 .handle = NV_ENGCTX(COPY0, 0xc0),
85 .ofuncs = &nve0_copy_context_ofuncs,
86};
87
88/*******************************************************************************
89 * PCOPY engine/subdev functions
90 ******************************************************************************/
91
92static int
93nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
94 struct nouveau_oclass *oclass, void *data, u32 size,
95 struct nouveau_object **pobject)
96{
97 struct nve0_copy_priv *priv;
98 int ret;
99
100 if (nv_rd32(parent, 0x022500) & 0x00000100)
101 return -ENODEV;
102
103 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_subdev(priv)->unit = 0x00000040;
109 nv_engine(priv)->cclass = &nve0_copy_cclass;
110 nv_engine(priv)->sclass = nve0_copy_sclass;
111 return 0;
112}
113
114static int
115nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nve0_copy_priv *priv;
120 int ret;
121
122 if (nv_rd32(parent, 0x022500) & 0x00000200)
123 return -ENODEV;
124
125 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
126 *pobject = nv_object(priv);
127 if (ret)
128 return ret;
129
130 nv_subdev(priv)->unit = 0x00000080;
131 nv_engine(priv)->cclass = &nve0_copy_cclass;
132 nv_engine(priv)->sclass = nve0_copy_sclass;
133 return 0;
134}
135
136struct nouveau_oclass
137nve0_copy0_oclass = {
138 .handle = NV_ENGINE(COPY0, 0xe0),
139 .ofuncs = &(struct nouveau_ofuncs) {
140 .ctor = nve0_copy0_ctor,
141 .dtor = _nouveau_copy_dtor,
142 .init = _nouveau_copy_init,
143 .fini = _nouveau_copy_fini,
144 },
145};
146
147struct nouveau_oclass
148nve0_copy1_oclass = {
149 .handle = NV_ENGINE(COPY1, 0xe0),
150 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nve0_copy1_ctor,
152 .dtor = _nouveau_copy_dtor,
153 .init = _nouveau_copy_init,
154 .fini = _nouveau_copy_fini,
155 },
156};
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
index 7393813044de..629da02dc352 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc
@@ -238,7 +238,7 @@ ih:
238 cmpu b32 $r4 0x60+#dma_count 238 cmpu b32 $r4 0x60+#dma_count
239 bra nc #illegal_mthd 239 bra nc #illegal_mthd
240 shl b32 $r5 $r4 2 240 shl b32 $r5 $r4 2
241 add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff 241 add b32 $r5 ((#ctx_dma - 0x60 * 4) & 0xffff)
242 bset $r3 0x1e 242 bset $r3 0x1e
243 st b32 D[$r5] $r3 243 st b32 D[$r5] $r3
244 add b32 $r4 0x180 - 0x60 244 add b32 $r4 0x180 - 0x60
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
index 38676c74e6e0..09962e4210e9 100644
--- a/drivers/gpu/drm/nouveau/nv98_crypt.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/fuc/nv98.fuc.h
@@ -1,4 +1,4 @@
1uint32_t nv98_pcrypt_data[] = { 1static uint32_t nv98_pcrypt_data[] = {
2/* 0x0000: ctx_dma */ 2/* 0x0000: ctx_dma */
3/* 0x0000: ctx_dma_query */ 3/* 0x0000: ctx_dma_query */
4 0x00000000, 4 0x00000000,
@@ -150,7 +150,7 @@ uint32_t nv98_pcrypt_data[] = {
150 0x00000000, 150 0x00000000,
151}; 151};
152 152
153uint32_t nv98_pcrypt_code[] = { 153static uint32_t nv98_pcrypt_code[] = {
154 0x17f004bd, 154 0x17f004bd,
155 0x0010fe35, 155 0x0010fe35,
156 0xf10004fe, 156 0xf10004fe,
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
new file mode 100644
index 000000000000..1d85e5b66ca0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29#include <core/gpuobj.h>
30
31#include <subdev/fb.h>
32
33#include <engine/fifo.h>
34#include <engine/crypt.h>
35
36struct nv84_crypt_priv {
37 struct nouveau_crypt base;
38};
39
40struct nv84_crypt_chan {
41 struct nouveau_crypt_chan base;
42};
43
44/*******************************************************************************
45 * Crypt object classes
46 ******************************************************************************/
47
48static int
49nv84_crypt_object_ctor(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nouveau_gpuobj *obj;
55 int ret;
56
57 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
58 16, 16, 0, &obj);
59 *pobject = nv_object(obj);
60 if (ret)
61 return ret;
62
63 nv_wo32(obj, 0x00, nv_mclass(obj));
64 nv_wo32(obj, 0x04, 0x00000000);
65 nv_wo32(obj, 0x08, 0x00000000);
66 nv_wo32(obj, 0x0c, 0x00000000);
67 return 0;
68}
69
70static struct nouveau_ofuncs
71nv84_crypt_ofuncs = {
72 .ctor = nv84_crypt_object_ctor,
73 .dtor = _nouveau_gpuobj_dtor,
74 .init = _nouveau_gpuobj_init,
75 .fini = _nouveau_gpuobj_fini,
76 .rd32 = _nouveau_gpuobj_rd32,
77 .wr32 = _nouveau_gpuobj_wr32,
78};
79
80static struct nouveau_oclass
81nv84_crypt_sclass[] = {
82 { 0x74c1, &nv84_crypt_ofuncs },
83 {}
84};
85
86/*******************************************************************************
87 * PCRYPT context
88 ******************************************************************************/
89
90static int
91nv84_crypt_context_ctor(struct nouveau_object *parent,
92 struct nouveau_object *engine,
93 struct nouveau_oclass *oclass, void *data, u32 size,
94 struct nouveau_object **pobject)
95{
96 struct nv84_crypt_chan *priv;
97 int ret;
98
99 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
101 *pobject = nv_object(priv);
102 if (ret)
103 return ret;
104
105 return 0;
106}
107
108static struct nouveau_oclass
109nv84_crypt_cclass = {
110 .handle = NV_ENGCTX(CRYPT, 0x84),
111 .ofuncs = &(struct nouveau_ofuncs) {
112 .ctor = nv84_crypt_context_ctor,
113 .dtor = _nouveau_crypt_context_dtor,
114 .init = _nouveau_crypt_context_init,
115 .fini = _nouveau_crypt_context_fini,
116 .rd32 = _nouveau_crypt_context_rd32,
117 .wr32 = _nouveau_crypt_context_wr32,
118 },
119};
120
121/*******************************************************************************
122 * PCRYPT engine/subdev functions
123 ******************************************************************************/
124
125static const struct nouveau_bitfield nv84_crypt_intr_mask[] = {
126 { 0x00000001, "INVALID_STATE" },
127 { 0x00000002, "ILLEGAL_MTHD" },
128 { 0x00000004, "ILLEGAL_CLASS" },
129 { 0x00000080, "QUERY" },
130 { 0x00000100, "FAULT" },
131 {}
132};
133
134static void
135nv84_crypt_intr(struct nouveau_subdev *subdev)
136{
137 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
138 struct nouveau_engine *engine = nv_engine(subdev);
139 struct nouveau_object *engctx;
140 struct nv84_crypt_priv *priv = (void *)subdev;
141 u32 stat = nv_rd32(priv, 0x102130);
142 u32 mthd = nv_rd32(priv, 0x102190);
143 u32 data = nv_rd32(priv, 0x102194);
144 u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
145 int chid;
146
147 engctx = nouveau_engctx_get(engine, inst);
148 chid = pfifo->chid(pfifo, engctx);
149
150 if (stat) {
151 nv_error(priv, "");
152 nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
153 printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n",
154 chid, (u64)inst << 12, mthd, data);
155 }
156
157 nv_wr32(priv, 0x102130, stat);
158 nv_wr32(priv, 0x10200c, 0x10);
159
160 nv50_fb_trap(nouveau_fb(priv), 1);
161 nouveau_engctx_put(engctx);
162}
163
164static int
165nv84_crypt_tlb_flush(struct nouveau_engine *engine)
166{
167 nv50_vm_flush_engine(&engine->base, 0x0a);
168 return 0;
169}
170
171static int
172nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject)
175{
176 struct nv84_crypt_priv *priv;
177 int ret;
178
179 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
180 *pobject = nv_object(priv);
181 if (ret)
182 return ret;
183
184 nv_subdev(priv)->unit = 0x00004000;
185 nv_subdev(priv)->intr = nv84_crypt_intr;
186 nv_engine(priv)->cclass = &nv84_crypt_cclass;
187 nv_engine(priv)->sclass = nv84_crypt_sclass;
188 nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
189 return 0;
190}
191
192static int
193nv84_crypt_init(struct nouveau_object *object)
194{
195 struct nv84_crypt_priv *priv = (void *)object;
196 int ret;
197
198 ret = nouveau_crypt_init(&priv->base);
199 if (ret)
200 return ret;
201
202 nv_wr32(priv, 0x102130, 0xffffffff);
203 nv_wr32(priv, 0x102140, 0xffffffbf);
204 nv_wr32(priv, 0x10200c, 0x00000010);
205 return 0;
206}
207
208struct nouveau_oclass
209nv84_crypt_oclass = {
210 .handle = NV_ENGINE(CRYPT, 0x84),
211 .ofuncs = &(struct nouveau_ofuncs) {
212 .ctor = nv84_crypt_ctor,
213 .dtor = _nouveau_crypt_dtor,
214 .init = nv84_crypt_init,
215 .fini = _nouveau_crypt_fini,
216 },
217};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
new file mode 100644
index 000000000000..9e3876c89b96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -0,0 +1,208 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/enum.h>
27#include <core/class.h>
28#include <core/engctx.h>
29
30#include <subdev/timer.h>
31#include <subdev/fb.h>
32
33#include <engine/fifo.h>
34#include <engine/crypt.h>
35
36#include "fuc/nv98.fuc.h"
37
38struct nv98_crypt_priv {
39 struct nouveau_crypt base;
40};
41
42struct nv98_crypt_chan {
43 struct nouveau_crypt_chan base;
44};
45
46/*******************************************************************************
47 * Crypt object classes
48 ******************************************************************************/
49
50static struct nouveau_oclass
51nv98_crypt_sclass[] = {
52 { 0x88b4, &nouveau_object_ofuncs },
53 {},
54};
55
56/*******************************************************************************
57 * PCRYPT context
58 ******************************************************************************/
59
60static int
61nv98_crypt_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nv98_crypt_chan *priv;
67 int ret;
68
69 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
70 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass
79nv98_crypt_cclass = {
80 .handle = NV_ENGCTX(CRYPT, 0x98),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv98_crypt_context_ctor,
83 .dtor = _nouveau_crypt_context_dtor,
84 .init = _nouveau_crypt_context_init,
85 .fini = _nouveau_crypt_context_fini,
86 .rd32 = _nouveau_crypt_context_rd32,
87 .wr32 = _nouveau_crypt_context_wr32,
88 },
89};
90
91/*******************************************************************************
92 * PCRYPT engine/subdev functions
93 ******************************************************************************/
94
95static const struct nouveau_enum nv98_crypt_isr_error_name[] = {
96 { 0x0000, "ILLEGAL_MTHD" },
97 { 0x0001, "INVALID_BITFIELD" },
98 { 0x0002, "INVALID_ENUM" },
99 { 0x0003, "QUERY" },
100 {}
101};
102
103static void
104nv98_crypt_intr(struct nouveau_subdev *subdev)
105{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev);
108 struct nouveau_object *engctx;
109 struct nv98_crypt_priv *priv = (void *)subdev;
110 u32 disp = nv_rd32(priv, 0x08701c);
111 u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
112 u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
113 u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
114 u32 addr = nv_rd32(priv, 0x087040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x087044);
118 int chid;
119
120 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx);
122
123 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR [");
125 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, (u64)inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x087004, 0x00000040);
129 stat &= ~0x00000040;
130 }
131
132 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x087004, stat);
135 }
136
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx);
139}
140
141static int
142nv98_crypt_tlb_flush(struct nouveau_engine *engine)
143{
144 nv50_vm_flush_engine(&engine->base, 0x0a);
145 return 0;
146}
147
148static int
149nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
150 struct nouveau_oclass *oclass, void *data, u32 size,
151 struct nouveau_object **pobject)
152{
153 struct nv98_crypt_priv *priv;
154 int ret;
155
156 ret = nouveau_crypt_create(parent, engine, oclass, &priv);
157 *pobject = nv_object(priv);
158 if (ret)
159 return ret;
160
161 nv_subdev(priv)->unit = 0x00004000;
162 nv_subdev(priv)->intr = nv98_crypt_intr;
163 nv_engine(priv)->cclass = &nv98_crypt_cclass;
164 nv_engine(priv)->sclass = nv98_crypt_sclass;
165 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
166 return 0;
167}
168
169static int
170nv98_crypt_init(struct nouveau_object *object)
171{
172 struct nv98_crypt_priv *priv = (void *)object;
173 int ret, i;
174
175 ret = nouveau_crypt_init(&priv->base);
176 if (ret)
177 return ret;
178
179 /* wait for exit interrupt to signal */
180 nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
181 nv_wr32(priv, 0x087004, 0x00000010);
182
183 /* upload microcode code and data segments */
184 nv_wr32(priv, 0x087ff8, 0x00100000);
185 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
186 nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
187
188 nv_wr32(priv, 0x087ff8, 0x00000000);
189 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
190 nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
191
192 /* start it running */
193 nv_wr32(priv, 0x08710c, 0x00000000);
194 nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
195 nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
196 return 0;
197}
198
199struct nouveau_oclass
200nv98_crypt_oclass = {
201 .handle = NV_ENGINE(CRYPT, 0x98),
202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv98_crypt_ctor,
204 .dtor = _nouveau_crypt_dtor,
205 .init = nv98_crypt_init,
206 .fini = _nouveau_crypt_fini,
207 },
208};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 000000000000..1c919f2af89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/disp.h>
26
27struct nv04_disp_priv {
28 struct nouveau_disp base;
29};
30
31static struct nouveau_oclass
32nv04_disp_sclass[] = {
33 {},
34};
35
36static void
37nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
38{
39 struct nouveau_disp *disp = &priv->base;
40 if (disp->vblank.notify)
41 disp->vblank.notify(disp->vblank.data, crtc);
42}
43
44static void
45nv04_disp_intr(struct nouveau_subdev *subdev)
46{
47 struct nv04_disp_priv *priv = (void *)subdev;
48 u32 crtc0 = nv_rd32(priv, 0x600100);
49 u32 crtc1 = nv_rd32(priv, 0x602100);
50
51 if (crtc0 & 0x00000001) {
52 nv04_disp_intr_vblank(priv, 0);
53 nv_wr32(priv, 0x600100, 0x00000001);
54 }
55
56 if (crtc1 & 0x00000001) {
57 nv04_disp_intr_vblank(priv, 1);
58 nv_wr32(priv, 0x602100, 0x00000001);
59 }
60}
61
62static int
63nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
66{
67 struct nv04_disp_priv *priv;
68 int ret;
69
70 ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
71 "display", &priv);
72 *pobject = nv_object(priv);
73 if (ret)
74 return ret;
75
76 nv_engine(priv)->sclass = nv04_disp_sclass;
77 nv_subdev(priv)->intr = nv04_disp_intr;
78 return 0;
79}
80
81struct nouveau_oclass
82nv04_disp_oclass = {
83 .handle = NV_ENGINE(DISP, 0x04),
84 .ofuncs = &(struct nouveau_ofuncs) {
85 .ctor = nv04_disp_ctor,
86 .dtor = _nouveau_disp_dtor,
87 .init = _nouveau_disp_init,
88 .fini = _nouveau_disp_fini,
89 },
90};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 000000000000..16a9afb1060b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28struct nv50_disp_priv {
29 struct nouveau_disp base;
30};
31
32static struct nouveau_oclass
33nv50_disp_sclass[] = {
34 {},
35};
36
37static void
38nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
39{
40 struct nouveau_disp *disp = &priv->base;
41 struct nouveau_software_chan *chan, *temp;
42 unsigned long flags;
43
44 spin_lock_irqsave(&disp->vblank.lock, flags);
45 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
46 if (chan->vblank.crtc != crtc)
47 continue;
48
49 nv_wr32(priv, 0x001704, chan->vblank.channel);
50 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
51
52 if (nv_device(priv)->chipset == 0x50) {
53 nv_wr32(priv, 0x001570, chan->vblank.offset);
54 nv_wr32(priv, 0x001574, chan->vblank.value);
55 } else {
56 if (nv_device(priv)->chipset >= 0xc0) {
57 nv_wr32(priv, 0x06000c,
58 upper_32_bits(chan->vblank.offset));
59 }
60 nv_wr32(priv, 0x060010, chan->vblank.offset);
61 nv_wr32(priv, 0x060014, chan->vblank.value);
62 }
63
64 list_del(&chan->vblank.head);
65 if (disp->vblank.put)
66 disp->vblank.put(disp->vblank.data, crtc);
67 }
68 spin_unlock_irqrestore(&disp->vblank.lock, flags);
69
70 if (disp->vblank.notify)
71 disp->vblank.notify(disp->vblank.data, crtc);
72}
73
74static void
75nv50_disp_intr(struct nouveau_subdev *subdev)
76{
77 struct nv50_disp_priv *priv = (void *)subdev;
78 u32 stat1 = nv_rd32(priv, 0x610024);
79
80 if (stat1 & 0x00000004) {
81 nv50_disp_intr_vblank(priv, 0);
82 nv_wr32(priv, 0x610024, 0x00000004);
83 stat1 &= ~0x00000004;
84 }
85
86 if (stat1 & 0x00000008) {
87 nv50_disp_intr_vblank(priv, 1);
88 nv_wr32(priv, 0x610024, 0x00000008);
89 stat1 &= ~0x00000008;
90 }
91
92}
93
94static int
95nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, void *data, u32 size,
97 struct nouveau_object **pobject)
98{
99 struct nv50_disp_priv *priv;
100 int ret;
101
102 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
103 "display", &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_engine(priv)->sclass = nv50_disp_sclass;
109 nv_subdev(priv)->intr = nv50_disp_intr;
110
111 INIT_LIST_HEAD(&priv->base.vblank.list);
112 spin_lock_init(&priv->base.vblank.lock);
113 return 0;
114}
115
116struct nouveau_oclass
117nv50_disp_oclass = {
118 .handle = NV_ENGINE(DISP, 0x50),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv50_disp_ctor,
121 .dtor = _nouveau_disp_dtor,
122 .init = _nouveau_disp_init,
123 .fini = _nouveau_disp_fini,
124 },
125};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 000000000000..d93efbcf75b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bar.h>
26
27#include <engine/software.h>
28#include <engine/disp.h>
29
30struct nvd0_disp_priv {
31 struct nouveau_disp base;
32};
33
34static struct nouveau_oclass
35nvd0_disp_sclass[] = {
36 {},
37};
38
39static void
40nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
41{
42 struct nouveau_bar *bar = nouveau_bar(priv);
43 struct nouveau_disp *disp = &priv->base;
44 struct nouveau_software_chan *chan, *temp;
45 unsigned long flags;
46
47 spin_lock_irqsave(&disp->vblank.lock, flags);
48 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
49 if (chan->vblank.crtc != crtc)
50 continue;
51
52 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
53 bar->flush(bar);
54 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
55 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
56 nv_wr32(priv, 0x060014, chan->vblank.value);
57
58 list_del(&chan->vblank.head);
59 if (disp->vblank.put)
60 disp->vblank.put(disp->vblank.data, crtc);
61 }
62 spin_unlock_irqrestore(&disp->vblank.lock, flags);
63
64 if (disp->vblank.notify)
65 disp->vblank.notify(disp->vblank.data, crtc);
66}
67
68static void
69nvd0_disp_intr(struct nouveau_subdev *subdev)
70{
71 struct nvd0_disp_priv *priv = (void *)subdev;
72 u32 intr = nv_rd32(priv, 0x610088);
73 int i;
74
75 for (i = 0; i < 4; i++) {
76 u32 mask = 0x01000000 << i;
77 if (mask & intr) {
78 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
79 if (stat & 0x00000001)
80 nvd0_disp_intr_vblank(priv, i);
81 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
82 nv_rd32(priv, 0x6100c0 + (i * 0x800));
83 }
84 }
85}
86
87static int
88nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, void *data, u32 size,
90 struct nouveau_object **pobject)
91{
92 struct nvd0_disp_priv *priv;
93 int ret;
94
95 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
96 "display", &priv);
97 *pobject = nv_object(priv);
98 if (ret)
99 return ret;
100
101 nv_engine(priv)->sclass = nvd0_disp_sclass;
102 nv_subdev(priv)->intr = nvd0_disp_intr;
103
104 INIT_LIST_HEAD(&priv->base.vblank.list);
105 spin_lock_init(&priv->base.vblank.lock);
106 return 0;
107}
108
109struct nouveau_oclass
110nvd0_disp_oclass = {
111 .handle = NV_ENGINE(DISP, 0xd0),
112 .ofuncs = &(struct nouveau_ofuncs) {
113 .ctor = nvd0_disp_ctor,
114 .dtor = _nouveau_disp_dtor,
115 .init = _nouveau_disp_init,
116 .fini = _nouveau_disp_fini,
117 },
118};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/vga.c b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
new file mode 100644
index 000000000000..5a1c68474597
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/vga.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/subdev.h>
26#include <core/device.h>
27#include <subdev/vga.h>
28
29u8
30nv_rdport(void *obj, int head, u16 port)
31{
32 struct nouveau_device *device = nv_device(obj);
33
34 if (device->card_type >= NV_50)
35 return nv_rd08(obj, 0x601000 + port);
36
37 if (port == 0x03c0 || port == 0x03c1 || /* AR */
38 port == 0x03c2 || port == 0x03da || /* INP0 */
39 port == 0x03d4 || port == 0x03d5) /* CR */
40 return nv_rd08(obj, 0x601000 + (head * 0x2000) + port);
41
42 if (port == 0x03c2 || port == 0x03cc || /* MISC */
43 port == 0x03c4 || port == 0x03c5 || /* SR */
44 port == 0x03ce || port == 0x03cf) { /* GR */
45 if (device->card_type < NV_40)
46 head = 0; /* CR44 selects head */
47 return nv_rd08(obj, 0x0c0000 + (head * 0x2000) + port);
48 }
49
50 nv_error(obj, "unknown vga port 0x%04x\n", port);
51 return 0x00;
52}
53
54void
55nv_wrport(void *obj, int head, u16 port, u8 data)
56{
57 struct nouveau_device *device = nv_device(obj);
58
59 if (device->card_type >= NV_50)
60 nv_wr08(obj, 0x601000 + port, data);
61 else
62 if (port == 0x03c0 || port == 0x03c1 || /* AR */
63 port == 0x03c2 || port == 0x03da || /* INP0 */
64 port == 0x03d4 || port == 0x03d5) /* CR */
65 nv_wr08(obj, 0x601000 + (head * 0x2000) + port, data);
66 else
67 if (port == 0x03c2 || port == 0x03cc || /* MISC */
68 port == 0x03c4 || port == 0x03c5 || /* SR */
69 port == 0x03ce || port == 0x03cf) { /* GR */
70 if (device->card_type < NV_40)
71 head = 0; /* CR44 selects head */
72 nv_wr08(obj, 0x0c0000 + (head * 0x2000) + port, data);
73 } else
74 nv_error(obj, "unknown vga port 0x%04x\n", port);
75}
76
77u8
78nv_rdvgas(void *obj, int head, u8 index)
79{
80 nv_wrport(obj, head, 0x03c4, index);
81 return nv_rdport(obj, head, 0x03c5);
82}
83
84void
85nv_wrvgas(void *obj, int head, u8 index, u8 value)
86{
87 nv_wrport(obj, head, 0x03c4, index);
88 nv_wrport(obj, head, 0x03c5, value);
89}
90
91u8
92nv_rdvgag(void *obj, int head, u8 index)
93{
94 nv_wrport(obj, head, 0x03ce, index);
95 return nv_rdport(obj, head, 0x03cf);
96}
97
98void
99nv_wrvgag(void *obj, int head, u8 index, u8 value)
100{
101 nv_wrport(obj, head, 0x03ce, index);
102 nv_wrport(obj, head, 0x03cf, value);
103}
104
105u8
106nv_rdvgac(void *obj, int head, u8 index)
107{
108 nv_wrport(obj, head, 0x03d4, index);
109 return nv_rdport(obj, head, 0x03d5);
110}
111
112void
113nv_wrvgac(void *obj, int head, u8 index, u8 value)
114{
115 nv_wrport(obj, head, 0x03d4, index);
116 nv_wrport(obj, head, 0x03d5, value);
117}
118
119u8
120nv_rdvgai(void *obj, int head, u16 port, u8 index)
121{
122 if (port == 0x03c4) return nv_rdvgas(obj, head, index);
123 if (port == 0x03ce) return nv_rdvgag(obj, head, index);
124 if (port == 0x03d4) return nv_rdvgac(obj, head, index);
125 nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
126 return 0x00;
127}
128
129void
130nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value)
131{
132 if (port == 0x03c4) nv_wrvgas(obj, head, index, value);
133 else if (port == 0x03ce) nv_wrvgag(obj, head, index, value);
134 else if (port == 0x03d4) nv_wrvgac(obj, head, index, value);
135 else nv_error(obj, "unknown indexed vga port 0x%04x\n", port);
136}
137
138bool
139nv_lockvgac(void *obj, bool lock)
140{
141 bool locked = !nv_rdvgac(obj, 0, 0x1f);
142 u8 data = lock ? 0x99 : 0x57;
143 nv_wrvgac(obj, 0, 0x1f, data);
144 if (nv_device(obj)->chipset == 0x11) {
145 if (!(nv_rd32(obj, 0x001084) & 0x10000000))
146 nv_wrvgac(obj, 1, 0x1f, data);
147 }
148 return locked;
149}
150
151/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
152 * it affects only the 8 bit vga io regs, which we access using mmio at
153 * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
154 * in general, the set value of cr44 does not matter: reg access works as
155 * expected and values can be set for the appropriate head by using a 0x2000
156 * offset as required
157 * however:
158 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
159 * cr44 must be set to 0 or 3 for accessing values on the correct head
160 * through the common 0xc03c* addresses
161 * b) in tied mode (4) head B is programmed to the values set on head A, and
162 * access using the head B addresses can have strange results, ergo we leave
163 * tied mode in init once we know to what cr44 should be restored on exit
164 *
165 * the owner parameter is slightly abused:
166 * 0 and 1 are treated as head values and so the set value is (owner * 3)
167 * other values are treated as literal values to set
168 */
169u8
170nv_rdvgaowner(void *obj)
171{
172 if (nv_device(obj)->card_type < NV_50) {
173 if (nv_device(obj)->chipset == 0x11) {
174 u32 tied = nv_rd32(obj, 0x001084) & 0x10000000;
175 if (tied == 0) {
176 u8 slA = nv_rdvgac(obj, 0, 0x28) & 0x80;
177 u8 tvA = nv_rdvgac(obj, 0, 0x33) & 0x01;
178 u8 slB = nv_rdvgac(obj, 1, 0x28) & 0x80;
179 u8 tvB = nv_rdvgac(obj, 1, 0x33) & 0x01;
180 if (slA && !tvA) return 0x00;
181 if (slB && !tvB) return 0x03;
182 if (slA) return 0x00;
183 if (slB) return 0x03;
184 return 0x00;
185 }
186 return 0x04;
187 }
188
189 return nv_rdvgac(obj, 0, 0x44);
190 }
191
192 nv_error(obj, "rdvgaowner after nv4x\n");
193 return 0x00;
194}
195
196void
197nv_wrvgaowner(void *obj, u8 select)
198{
199 if (nv_device(obj)->card_type < NV_50) {
200 u8 owner = (select == 1) ? 3 : select;
201 if (nv_device(obj)->chipset == 0x11) {
202 /* workaround hw lockup bug */
203 nv_rdvgac(obj, 0, 0x1f);
204 nv_rdvgac(obj, 1, 0x1f);
205 }
206
207 nv_wrvgac(obj, 0, 0x44, owner);
208
209 if (nv_device(obj)->chipset == 0x11) {
210 nv_wrvgac(obj, 0, 0x2e, owner);
211 nv_wrvgac(obj, 0, 0x2e, owner);
212 }
213 } else
214 nv_error(obj, "wrvgaowner after nv4x\n");
215}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 000000000000..e1f013d39768
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30
31int
32nouveau_dmaobj_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 void *data, u32 size, int len, void **pobject)
36{
37 struct nv_dma_class *args = data;
38 struct nouveau_dmaobj *object;
39 int ret;
40
41 if (size < sizeof(*args))
42 return -EINVAL;
43
44 ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
45 object = *pobject;
46 if (ret)
47 return ret;
48
49 switch (args->flags & NV_DMA_TARGET_MASK) {
50 case NV_DMA_TARGET_VM:
51 object->target = NV_MEM_TARGET_VM;
52 break;
53 case NV_DMA_TARGET_VRAM:
54 object->target = NV_MEM_TARGET_VRAM;
55 break;
56 case NV_DMA_TARGET_PCI:
57 object->target = NV_MEM_TARGET_PCI;
58 break;
59 case NV_DMA_TARGET_PCI_US:
60 case NV_DMA_TARGET_AGP:
61 object->target = NV_MEM_TARGET_PCI_NOSNOOP;
62 break;
63 default:
64 return -EINVAL;
65 }
66
67 switch (args->flags & NV_DMA_ACCESS_MASK) {
68 case NV_DMA_ACCESS_VM:
69 object->access = NV_MEM_ACCESS_VM;
70 break;
71 case NV_DMA_ACCESS_RD:
72 object->access = NV_MEM_ACCESS_RO;
73 break;
74 case NV_DMA_ACCESS_WR:
75 object->access = NV_MEM_ACCESS_WO;
76 break;
77 case NV_DMA_ACCESS_RDWR:
78 object->access = NV_MEM_ACCESS_RW;
79 break;
80 default:
81 return -EINVAL;
82 }
83
84 object->start = args->start;
85 object->limit = args->limit;
86 return 0;
87}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 000000000000..9f4cc2f31994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <subdev/vm/nv04.h>
30
31#include <engine/dmaobj.h>
32
33struct nv04_dmaeng_priv {
34 struct nouveau_dmaeng base;
35};
36
37struct nv04_dmaobj_priv {
38 struct nouveau_dmaobj base;
39};
40
41static int
42nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
43 struct nouveau_object *parent,
44 struct nouveau_dmaobj *dmaobj,
45 struct nouveau_gpuobj **pgpuobj)
46{
47 struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
48 struct nouveau_gpuobj *gpuobj;
49 u32 flags0 = nv_mclass(dmaobj);
50 u32 flags2 = 0x00000000;
51 u64 offset = dmaobj->start & 0xfffff000;
52 u64 adjust = dmaobj->start & 0x00000fff;
53 u32 length = dmaobj->limit - dmaobj->start;
54 int ret;
55
56 if (dmaobj->target == NV_MEM_TARGET_VM) {
57 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
58 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
59 if (!dmaobj->start)
60 return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
61 offset = nv_ro32(pgt, 8 + (offset >> 10));
62 offset &= 0xfffff000;
63 }
64
65 dmaobj->target = NV_MEM_TARGET_PCI;
66 dmaobj->access = NV_MEM_ACCESS_RW;
67 }
68
69 switch (dmaobj->target) {
70 case NV_MEM_TARGET_VRAM:
71 flags0 |= 0x00003000;
72 break;
73 case NV_MEM_TARGET_PCI:
74 flags0 |= 0x00023000;
75 break;
76 case NV_MEM_TARGET_PCI_NOSNOOP:
77 flags0 |= 0x00033000;
78 break;
79 default:
80 return -EINVAL;
81 }
82
83 switch (dmaobj->access) {
84 case NV_MEM_ACCESS_RO:
85 flags0 |= 0x00004000;
86 break;
87 case NV_MEM_ACCESS_WO:
88 flags0 |= 0x00008000;
89 case NV_MEM_ACCESS_RW:
90 flags2 |= 0x00000002;
91 break;
92 default:
93 return -EINVAL;
94 }
95
96 ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
97 *pgpuobj = gpuobj;
98 if (ret == 0) {
99 nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
100 nv_wo32(*pgpuobj, 0x04, length);
101 nv_wo32(*pgpuobj, 0x08, flags2 | offset);
102 nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
103 }
104
105 return ret;
106}
107
108static int
109nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_dmaeng *dmaeng = (void *)engine;
114 struct nv04_dmaobj_priv *dmaobj;
115 struct nouveau_gpuobj *gpuobj;
116 int ret;
117
118 ret = nouveau_dmaobj_create(parent, engine, oclass,
119 data, size, &dmaobj);
120 *pobject = nv_object(dmaobj);
121 if (ret)
122 return ret;
123
124 switch (nv_mclass(parent)) {
125 case NV_DEVICE_CLASS:
126 break;
127 case NV03_CHANNEL_DMA_CLASS:
128 case NV10_CHANNEL_DMA_CLASS:
129 case NV17_CHANNEL_DMA_CLASS:
130 case NV40_CHANNEL_DMA_CLASS:
131 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
132 nouveau_object_ref(NULL, pobject);
133 *pobject = nv_object(gpuobj);
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return ret;
140}
141
142static struct nouveau_ofuncs
143nv04_dmaobj_ofuncs = {
144 .ctor = nv04_dmaobj_ctor,
145 .dtor = _nouveau_dmaobj_dtor,
146 .init = _nouveau_dmaobj_init,
147 .fini = _nouveau_dmaobj_fini,
148};
149
150static struct nouveau_oclass
151nv04_dmaobj_sclass[] = {
152 { 0x0002, &nv04_dmaobj_ofuncs },
153 { 0x0003, &nv04_dmaobj_ofuncs },
154 { 0x003d, &nv04_dmaobj_ofuncs },
155 {}
156};
157
158static int
159nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject)
162{
163 struct nv04_dmaeng_priv *priv;
164 int ret;
165
166 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
167 *pobject = nv_object(priv);
168 if (ret)
169 return ret;
170
171 priv->base.base.sclass = nv04_dmaobj_sclass;
172 priv->base.bind = nv04_dmaobj_bind;
173 return 0;
174}
175
176struct nouveau_oclass
177nv04_dmaeng_oclass = {
178 .handle = NV_ENGINE(DMAOBJ, 0x04),
179 .ofuncs = &(struct nouveau_ofuncs) {
180 .ctor = nv04_dmaeng_ctor,
181 .dtor = _nouveau_dmaeng_dtor,
182 .init = _nouveau_dmaeng_init,
183 .fini = _nouveau_dmaeng_fini,
184 },
185};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 000000000000..045d2565e289
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/class.h>
27
28#include <subdev/fb.h>
29#include <engine/dmaobj.h>
30
31struct nv50_dmaeng_priv {
32 struct nouveau_dmaeng base;
33};
34
35struct nv50_dmaobj_priv {
36 struct nouveau_dmaobj base;
37};
38
39static int
40nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
41 struct nouveau_object *parent,
42 struct nouveau_dmaobj *dmaobj,
43 struct nouveau_gpuobj **pgpuobj)
44{
45 u32 flags = nv_mclass(dmaobj);
46 int ret;
47
48 switch (dmaobj->target) {
49 case NV_MEM_TARGET_VM:
50 flags |= 0x00000000;
51 flags |= 0x60000000; /* COMPRESSION_USEVM */
52 flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
53 break;
54 case NV_MEM_TARGET_VRAM:
55 flags |= 0x00010000;
56 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
57 break;
58 case NV_MEM_TARGET_PCI:
59 flags |= 0x00020000;
60 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
61 break;
62 case NV_MEM_TARGET_PCI_NOSNOOP:
63 flags |= 0x00030000;
64 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
65 break;
66 default:
67 return -EINVAL;
68 }
69
70 switch (dmaobj->access) {
71 case NV_MEM_ACCESS_VM:
72 break;
73 case NV_MEM_ACCESS_RO:
74 flags |= 0x00040000;
75 break;
76 case NV_MEM_ACCESS_WO:
77 case NV_MEM_ACCESS_RW:
78 flags |= 0x00080000;
79 break;
80 }
81
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags);
85 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
86 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
87 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
88 upper_32_bits(dmaobj->start));
89 nv_wo32(*pgpuobj, 0x10, 0x00000000);
90 nv_wo32(*pgpuobj, 0x14, 0x00000000);
91 }
92
93 return ret;
94}
95
96static int
97nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 struct nouveau_oclass *oclass, void *data, u32 size,
99 struct nouveau_object **pobject)
100{
101 struct nouveau_dmaeng *dmaeng = (void *)engine;
102 struct nv50_dmaobj_priv *dmaobj;
103 struct nouveau_gpuobj *gpuobj;
104 int ret;
105
106 ret = nouveau_dmaobj_create(parent, engine, oclass,
107 data, size, &dmaobj);
108 *pobject = nv_object(dmaobj);
109 if (ret)
110 return ret;
111
112 switch (nv_mclass(parent)) {
113 case NV_DEVICE_CLASS:
114 break;
115 case NV50_CHANNEL_DMA_CLASS:
116 case NV84_CHANNEL_DMA_CLASS:
117 case NV50_CHANNEL_IND_CLASS:
118 case NV84_CHANNEL_IND_CLASS:
119 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
120 nouveau_object_ref(NULL, pobject);
121 *pobject = nv_object(gpuobj);
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 return ret;
128}
129
130static struct nouveau_ofuncs
131nv50_dmaobj_ofuncs = {
132 .ctor = nv50_dmaobj_ctor,
133 .dtor = _nouveau_dmaobj_dtor,
134 .init = _nouveau_dmaobj_init,
135 .fini = _nouveau_dmaobj_fini,
136};
137
138static struct nouveau_oclass
139nv50_dmaobj_sclass[] = {
140 { 0x0002, &nv50_dmaobj_ofuncs },
141 { 0x0003, &nv50_dmaobj_ofuncs },
142 { 0x003d, &nv50_dmaobj_ofuncs },
143 {}
144};
145
146static int
147nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
148 struct nouveau_oclass *oclass, void *data, u32 size,
149 struct nouveau_object **pobject)
150{
151 struct nv50_dmaeng_priv *priv;
152 int ret;
153
154 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
155 *pobject = nv_object(priv);
156 if (ret)
157 return ret;
158
159 priv->base.base.sclass = nv50_dmaobj_sclass;
160 priv->base.bind = nv50_dmaobj_bind;
161 return 0;
162}
163
164struct nouveau_oclass
165nv50_dmaeng_oclass = {
166 .handle = NV_ENGINE(DMAOBJ, 0x50),
167 .ofuncs = &(struct nouveau_ofuncs) {
168 .ctor = nv50_dmaeng_ctor,
169 .dtor = _nouveau_dmaeng_dtor,
170 .init = _nouveau_dmaeng_init,
171 .fini = _nouveau_dmaeng_fini,
172 },
173};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 000000000000..5baa08695535
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/fb.h>
28#include <engine/dmaobj.h>
29
30struct nvc0_dmaeng_priv {
31 struct nouveau_dmaeng base;
32};
33
34struct nvc0_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int
39nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
40 struct nouveau_oclass *oclass, void *data, u32 size,
41 struct nouveau_object **pobject)
42{
43 struct nvc0_dmaobj_priv *dmaobj;
44 int ret;
45
46 ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
47 *pobject = nv_object(dmaobj);
48 if (ret)
49 return ret;
50
51 if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
52 return -EINVAL;
53
54 return 0;
55}
56
57static struct nouveau_ofuncs
58nvc0_dmaobj_ofuncs = {
59 .ctor = nvc0_dmaobj_ctor,
60 .dtor = _nouveau_dmaobj_dtor,
61 .init = _nouveau_dmaobj_init,
62 .fini = _nouveau_dmaobj_fini,
63};
64
65static struct nouveau_oclass
66nvc0_dmaobj_sclass[] = {
67 { 0x0002, &nvc0_dmaobj_ofuncs },
68 { 0x0003, &nvc0_dmaobj_ofuncs },
69 { 0x003d, &nvc0_dmaobj_ofuncs },
70 {}
71};
72
73static int
74nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 struct nouveau_oclass *oclass, void *data, u32 size,
76 struct nouveau_object **pobject)
77{
78 struct nvc0_dmaeng_priv *priv;
79 int ret;
80
81 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv);
83 if (ret)
84 return ret;
85
86 priv->base.base.sclass = nvc0_dmaobj_sclass;
87 return 0;
88}
89
90struct nouveau_oclass
91nvc0_dmaeng_oclass = {
92 .handle = NV_ENGINE(DMAOBJ, 0xc0),
93 .ofuncs = &(struct nouveau_ofuncs) {
94 .ctor = nvc0_dmaeng_ctor,
95 .dtor = _nouveau_dmaeng_dtor,
96 .init = _nouveau_dmaeng_init,
97 .fini = _nouveau_dmaeng_fini,
98 },
99};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 000000000000..bbb43c67c2ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/handle.h>
27
28#include <engine/dmaobj.h>
29#include <engine/fifo.h>
30
31int
32nouveau_fifo_channel_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass,
35 int bar, u32 addr, u32 size, u32 pushbuf,
36 u32 engmask, int len, void **ptr)
37{
38 struct nouveau_device *device = nv_device(engine);
39 struct nouveau_fifo *priv = (void *)engine;
40 struct nouveau_fifo_chan *chan;
41 struct nouveau_dmaeng *dmaeng;
42 unsigned long flags;
43 int ret;
44
45 /* create base object class */
46 ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
47 engmask, len, ptr);
48 chan = *ptr;
49 if (ret)
50 return ret;
51
52 /* validate dma object representing push buffer */
53 chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
54 if (!chan->pushdma)
55 return -ENOENT;
56
57 dmaeng = (void *)chan->pushdma->base.engine;
58 switch (chan->pushdma->base.oclass->handle) {
59 case 0x0002:
60 case 0x003d:
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 if (dmaeng->bind) {
67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
68 if (ret)
69 return ret;
70 }
71
72 /* find a free fifo channel */
73 spin_lock_irqsave(&priv->lock, flags);
74 for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
75 if (!priv->channel[chan->chid]) {
76 priv->channel[chan->chid] = nv_object(chan);
77 break;
78 }
79 }
80 spin_unlock_irqrestore(&priv->lock, flags);
81
82 if (chan->chid == priv->max) {
83 nv_error(priv, "no free channels\n");
84 return -ENOSPC;
85 }
86
87 /* map fifo control registers */
88 chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
89 (chan->chid * size), size);
90 if (!chan->user)
91 return -EFAULT;
92
93 chan->size = size;
94 return 0;
95}
96
97void
98nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
99{
100 struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
101 unsigned long flags;
102
103 iounmap(chan->user);
104
105 spin_lock_irqsave(&priv->lock, flags);
106 priv->channel[chan->chid] = NULL;
107 spin_unlock_irqrestore(&priv->lock, flags);
108
109 nouveau_gpuobj_ref(NULL, &chan->pushgpu);
110 nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
111 nouveau_namedb_destroy(&chan->base);
112}
113
114void
115_nouveau_fifo_channel_dtor(struct nouveau_object *object)
116{
117 struct nouveau_fifo_chan *chan = (void *)object;
118 nouveau_fifo_channel_destroy(chan);
119}
120
121u32
122_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
123{
124 struct nouveau_fifo_chan *chan = (void *)object;
125 return ioread32_native(chan->user + addr);
126}
127
128void
129_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
130{
131 struct nouveau_fifo_chan *chan = (void *)object;
132 iowrite32_native(data, chan->user + addr);
133}
134
135static int
136nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
137{
138 int engidx = nv_hclass(priv) & 0xff;
139
140 while (object && object->parent) {
141 if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) &&
142 (nv_hclass(object->parent) & 0xff) == engidx)
143 return nouveau_fifo_chan(object)->chid;
144 object = object->parent;
145 }
146
147 return -1;
148}
149
150void
151nouveau_fifo_destroy(struct nouveau_fifo *priv)
152{
153 kfree(priv->channel);
154 nouveau_engine_destroy(&priv->base);
155}
156
157int
158nouveau_fifo_create_(struct nouveau_object *parent,
159 struct nouveau_object *engine,
160 struct nouveau_oclass *oclass,
161 int min, int max, int length, void **pobject)
162{
163 struct nouveau_fifo *priv;
164 int ret;
165
166 ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
167 "fifo", length, pobject);
168 priv = *pobject;
169 if (ret)
170 return ret;
171
172 priv->min = min;
173 priv->max = max;
174 priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
175 if (!priv->channel)
176 return -ENOMEM;
177
178 priv->chid = nouveau_fifo_chid;
179 spin_lock_init(&priv->lock);
180 return 0;
181}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
new file mode 100644
index 000000000000..ea76e3e8c9c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -0,0 +1,630 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/namedb.h>
29#include <core/handle.h>
30#include <core/ramht.h>
31
32#include <subdev/instmem.h>
33#include <subdev/instmem/nv04.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36
37#include <engine/fifo.h>
38
39#include "nv04.h"
40
41static struct ramfc_desc
42nv04_ramfc[] = {
43 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
44 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
45 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
46 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
47 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
48 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
49 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
50 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
51 {}
52};
53
54/*******************************************************************************
55 * FIFO channel objects
56 ******************************************************************************/
57
58int
59nv04_fifo_object_attach(struct nouveau_object *parent,
60 struct nouveau_object *object, u32 handle)
61{
62 struct nv04_fifo_priv *priv = (void *)parent->engine;
63 struct nv04_fifo_chan *chan = (void *)parent;
64 u32 context, chid = chan->base.chid;
65 int ret;
66
67 if (nv_iclass(object, NV_GPUOBJ_CLASS))
68 context = nv_gpuobj(object)->addr >> 4;
69 else
70 context = 0x00000004; /* just non-zero */
71
72 switch (nv_engidx(object->engine)) {
73 case NVDEV_ENGINE_DMAOBJ:
74 case NVDEV_ENGINE_SW:
75 context |= 0x00000000;
76 break;
77 case NVDEV_ENGINE_GR:
78 context |= 0x00010000;
79 break;
80 case NVDEV_ENGINE_MPEG:
81 context |= 0x00020000;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 context |= 0x80000000; /* valid */
88 context |= chid << 24;
89
90 mutex_lock(&nv_subdev(priv)->mutex);
91 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
92 mutex_unlock(&nv_subdev(priv)->mutex);
93 return ret;
94}
95
96void
97nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
98{
99 struct nv04_fifo_priv *priv = (void *)parent->engine;
100 mutex_lock(&nv_subdev(priv)->mutex);
101 nouveau_ramht_remove(priv->ramht, cookie);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103}
104
105int
106nv04_fifo_context_attach(struct nouveau_object *parent,
107 struct nouveau_object *object)
108{
109 nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
110 return 0;
111}
112
113static int
114nv04_fifo_chan_ctor(struct nouveau_object *parent,
115 struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nv04_fifo_priv *priv = (void *)engine;
120 struct nv04_fifo_chan *chan;
121 struct nv03_channel_dma_class *args = data;
122 int ret;
123
124 if (size < sizeof(*args))
125 return -EINVAL;
126
127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
128 0x10000, args->pushbuf,
129 (1 << NVDEV_ENGINE_DMAOBJ) |
130 (1 << NVDEV_ENGINE_SW) |
131 (1 << NVDEV_ENGINE_GR), &chan);
132 *pobject = nv_object(chan);
133 if (ret)
134 return ret;
135
136 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
137 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
138 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
139 chan->ramfc = chan->base.chid * 32;
140
141 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
142 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
143 nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
144 nv_wo32(priv->ramfc, chan->ramfc + 0x10,
145 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
146 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
147#ifdef __BIG_ENDIAN
148 NV_PFIFO_CACHE1_BIG_ENDIAN |
149#endif
150 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
151 return 0;
152}
153
154void
155nv04_fifo_chan_dtor(struct nouveau_object *object)
156{
157 struct nv04_fifo_priv *priv = (void *)object->engine;
158 struct nv04_fifo_chan *chan = (void *)object;
159 struct ramfc_desc *c = priv->ramfc_desc;
160
161 do {
162 nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
163 } while ((++c)->bits);
164
165 nouveau_fifo_channel_destroy(&chan->base);
166}
167
168int
169nv04_fifo_chan_init(struct nouveau_object *object)
170{
171 struct nv04_fifo_priv *priv = (void *)object->engine;
172 struct nv04_fifo_chan *chan = (void *)object;
173 u32 mask = 1 << chan->base.chid;
174 unsigned long flags;
175 int ret;
176
177 ret = nouveau_fifo_channel_init(&chan->base);
178 if (ret)
179 return ret;
180
181 spin_lock_irqsave(&priv->base.lock, flags);
182 nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
183 spin_unlock_irqrestore(&priv->base.lock, flags);
184 return 0;
185}
186
187int
188nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
189{
190 struct nv04_fifo_priv *priv = (void *)object->engine;
191 struct nv04_fifo_chan *chan = (void *)object;
192 struct nouveau_gpuobj *fctx = priv->ramfc;
193 struct ramfc_desc *c;
194 unsigned long flags;
195 u32 data = chan->ramfc;
196 u32 chid;
197
198 /* prevent fifo context switches */
199 spin_lock_irqsave(&priv->base.lock, flags);
200 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
201
202 /* if this channel is active, replace it with a null context */
203 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
204 if (chid == chan->base.chid) {
205 nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
206 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
207 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
208
209 c = priv->ramfc_desc;
210 do {
211 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
212 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
213 u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
214 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
215 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
216 } while ((++c)->bits);
217
218 c = priv->ramfc_desc;
219 do {
220 nv_wr32(priv, c->regp, 0x00000000);
221 } while ((++c)->bits);
222
223 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
224 nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
225 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
226 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
227 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
228 }
229
230 /* restore normal operation, after disabling dma mode */
231 nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
232 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
233 spin_unlock_irqrestore(&priv->base.lock, flags);
234
235 return nouveau_fifo_channel_fini(&chan->base, suspend);
236}
237
238static struct nouveau_ofuncs
239nv04_fifo_ofuncs = {
240 .ctor = nv04_fifo_chan_ctor,
241 .dtor = nv04_fifo_chan_dtor,
242 .init = nv04_fifo_chan_init,
243 .fini = nv04_fifo_chan_fini,
244 .rd32 = _nouveau_fifo_channel_rd32,
245 .wr32 = _nouveau_fifo_channel_wr32,
246};
247
248static struct nouveau_oclass
249nv04_fifo_sclass[] = {
250 { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
251 {}
252};
253
254/*******************************************************************************
255 * FIFO context - basically just the instmem reserved for the channel
256 ******************************************************************************/
257
258int
259nv04_fifo_context_ctor(struct nouveau_object *parent,
260 struct nouveau_object *engine,
261 struct nouveau_oclass *oclass, void *data, u32 size,
262 struct nouveau_object **pobject)
263{
264 struct nv04_fifo_base *base;
265 int ret;
266
267 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
268 0x1000, NVOBJ_FLAG_HEAP, &base);
269 *pobject = nv_object(base);
270 if (ret)
271 return ret;
272
273 return 0;
274}
275
276static struct nouveau_oclass
277nv04_fifo_cclass = {
278 .handle = NV_ENGCTX(FIFO, 0x04),
279 .ofuncs = &(struct nouveau_ofuncs) {
280 .ctor = nv04_fifo_context_ctor,
281 .dtor = _nouveau_fifo_context_dtor,
282 .init = _nouveau_fifo_context_init,
283 .fini = _nouveau_fifo_context_fini,
284 .rd32 = _nouveau_fifo_context_rd32,
285 .wr32 = _nouveau_fifo_context_wr32,
286 },
287};
288
289/*******************************************************************************
290 * PFIFO engine
291 ******************************************************************************/
292
293void
294nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
295__acquires(priv->base.lock)
296{
297 struct nv04_fifo_priv *priv = (void *)pfifo;
298 unsigned long flags;
299
300 spin_lock_irqsave(&priv->base.lock, flags);
301 *pflags = flags;
302
303 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
304 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
305
306 /* in some cases the puller may be left in an inconsistent state
307 * if you try to stop it while it's busy translating handles.
308 * sometimes you get a CACHE_ERROR, sometimes it just fails
309 * silently; sending incorrect instance offsets to PGRAPH after
310 * it's started up again.
311 *
312 * to avoid this, we invalidate the most recently calculated
313 * instance.
314 */
315 if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
316 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
317 nv_warn(priv, "timeout idling puller\n");
318
319 if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
320 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
321 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
322
323 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
324}
325
326void
327nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
328__releases(priv->base.lock)
329{
330 struct nv04_fifo_priv *priv = (void *)pfifo;
331 unsigned long flags = *pflags;
332
333 nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
334 nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
335
336 spin_unlock_irqrestore(&priv->base.lock, flags);
337}
338
339static const char *
340nv_dma_state_err(u32 state)
341{
342 static const char * const desc[] = {
343 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
344 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
345 };
346 return desc[(state >> 29) & 0x7];
347}
348
349static bool
350nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
351{
352 struct nv04_fifo_chan *chan = NULL;
353 struct nouveau_handle *bind;
354 const int subc = (addr >> 13) & 0x7;
355 const int mthd = addr & 0x1ffc;
356 bool handled = false;
357 unsigned long flags;
358 u32 engine;
359
360 spin_lock_irqsave(&priv->base.lock, flags);
361 if (likely(chid >= priv->base.min && chid <= priv->base.max))
362 chan = (void *)priv->base.channel[chid];
363 if (unlikely(!chan))
364 goto out;
365
366 switch (mthd) {
367 case 0x0000:
368 bind = nouveau_namedb_get(nv_namedb(chan), data);
369 if (unlikely(!bind))
370 break;
371
372 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
373 engine = 0x0000000f << (subc * 4);
374 chan->subc[subc] = data;
375 handled = true;
376
377 nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
378 }
379
380 nouveau_namedb_put(bind);
381 break;
382 default:
383 engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
384 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
385 break;
386
387 bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
388 if (likely(bind)) {
389 if (!nv_call(bind->object, mthd, data))
390 handled = true;
391 nouveau_namedb_put(bind);
392 }
393 break;
394 }
395
396out:
397 spin_unlock_irqrestore(&priv->base.lock, flags);
398 return handled;
399}
400
401void
402nv04_fifo_intr(struct nouveau_subdev *subdev)
403{
404 struct nouveau_device *device = nv_device(subdev);
405 struct nv04_fifo_priv *priv = (void *)subdev;
406 uint32_t status, reassign;
407 int cnt = 0;
408
409 reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
410 while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
411 uint32_t chid, get;
412
413 nv_wr32(priv, NV03_PFIFO_CACHES, 0);
414
415 chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
416 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
417
418 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
419 uint32_t mthd, data;
420 int ptr;
421
422 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
423 * wrapping on my G80 chips, but CACHE1 isn't big
424 * enough for this much data.. Tests show that it
425 * wraps around to the start at GET=0x800.. No clue
426 * as to why..
427 */
428 ptr = (get & 0x7ff) >> 2;
429
430 if (device->card_type < NV_40) {
431 mthd = nv_rd32(priv,
432 NV04_PFIFO_CACHE1_METHOD(ptr));
433 data = nv_rd32(priv,
434 NV04_PFIFO_CACHE1_DATA(ptr));
435 } else {
436 mthd = nv_rd32(priv,
437 NV40_PFIFO_CACHE1_METHOD(ptr));
438 data = nv_rd32(priv,
439 NV40_PFIFO_CACHE1_DATA(ptr));
440 }
441
442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
443 nv_info(priv, "CACHE_ERROR - Ch %d/%d "
444 "Mthd 0x%04x Data 0x%08x\n",
445 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
446 data);
447 }
448
449 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
450 nv_wr32(priv, NV03_PFIFO_INTR_0,
451 NV_PFIFO_INTR_CACHE_ERROR);
452
453 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
454 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
455 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
456 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
457 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
458 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
459
460 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
461 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
462 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
463
464 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
465 }
466
467 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
468 u32 dma_get = nv_rd32(priv, 0x003244);
469 u32 dma_put = nv_rd32(priv, 0x003240);
470 u32 push = nv_rd32(priv, 0x003220);
471 u32 state = nv_rd32(priv, 0x003228);
472
473 if (device->card_type == NV_50) {
474 u32 ho_get = nv_rd32(priv, 0x003328);
475 u32 ho_put = nv_rd32(priv, 0x003320);
476 u32 ib_get = nv_rd32(priv, 0x003334);
477 u32 ib_put = nv_rd32(priv, 0x003330);
478
479 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
481 "State 0x%08x (err: %s) Push 0x%08x\n",
482 chid, ho_get, dma_get, ho_put,
483 dma_put, ib_get, ib_put, state,
484 nv_dma_state_err(state),
485 push);
486
487 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
488 nv_wr32(priv, 0x003364, 0x00000000);
489 if (dma_get != dma_put || ho_get != ho_put) {
490 nv_wr32(priv, 0x003244, dma_put);
491 nv_wr32(priv, 0x003328, ho_put);
492 } else
493 if (ib_get != ib_put) {
494 nv_wr32(priv, 0x003334, ib_put);
495 }
496 } else {
497 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
499 chid, dma_get, dma_put, state,
500 nv_dma_state_err(state), push);
501
502 if (dma_get != dma_put)
503 nv_wr32(priv, 0x003244, dma_put);
504 }
505
506 nv_wr32(priv, 0x003228, 0x00000000);
507 nv_wr32(priv, 0x003220, 0x00000001);
508 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
509 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
510 }
511
512 if (status & NV_PFIFO_INTR_SEMAPHORE) {
513 uint32_t sem;
514
515 status &= ~NV_PFIFO_INTR_SEMAPHORE;
516 nv_wr32(priv, NV03_PFIFO_INTR_0,
517 NV_PFIFO_INTR_SEMAPHORE);
518
519 sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
520 nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
521
522 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
523 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
524 }
525
526 if (device->card_type == NV_50) {
527 if (status & 0x00000010) {
528 nv50_fb_trap(nouveau_fb(priv), 1);
529 status &= ~0x00000010;
530 nv_wr32(priv, 0x002100, 0x00000010);
531 }
532 }
533
534 if (status) {
535 nv_info(priv, "unknown intr 0x%08x, ch %d\n",
536 status, chid);
537 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
538 status = 0;
539 }
540
541 nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
542 }
543
544 if (status) {
545 nv_info(priv, "still angry after %d spins, halt\n", cnt);
546 nv_wr32(priv, 0x002140, 0);
547 nv_wr32(priv, 0x000140, 0);
548 }
549
550 nv_wr32(priv, 0x000100, 0x00000100);
551}
552
553static int
554nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
555 struct nouveau_oclass *oclass, void *data, u32 size,
556 struct nouveau_object **pobject)
557{
558 struct nv04_instmem_priv *imem = nv04_instmem(parent);
559 struct nv04_fifo_priv *priv;
560 int ret;
561
562 ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
563 *pobject = nv_object(priv);
564 if (ret)
565 return ret;
566
567 nouveau_ramht_ref(imem->ramht, &priv->ramht);
568 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
569 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
570
571 nv_subdev(priv)->unit = 0x00000100;
572 nv_subdev(priv)->intr = nv04_fifo_intr;
573 nv_engine(priv)->cclass = &nv04_fifo_cclass;
574 nv_engine(priv)->sclass = nv04_fifo_sclass;
575 priv->base.pause = nv04_fifo_pause;
576 priv->base.start = nv04_fifo_start;
577 priv->ramfc_desc = nv04_ramfc;
578 return 0;
579}
580
581void
582nv04_fifo_dtor(struct nouveau_object *object)
583{
584 struct nv04_fifo_priv *priv = (void *)object;
585 nouveau_gpuobj_ref(NULL, &priv->ramfc);
586 nouveau_gpuobj_ref(NULL, &priv->ramro);
587 nouveau_ramht_ref(NULL, &priv->ramht);
588 nouveau_fifo_destroy(&priv->base);
589}
590
591int
592nv04_fifo_init(struct nouveau_object *object)
593{
594 struct nv04_fifo_priv *priv = (void *)object;
595 int ret;
596
597 ret = nouveau_fifo_init(&priv->base);
598 if (ret)
599 return ret;
600
601 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
602 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
603
604 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
605 ((priv->ramht->bits - 9) << 16) |
606 (priv->ramht->base.addr >> 8));
607 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
608 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
609
610 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
611
612 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
613 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
614
615 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
616 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
617 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
618 return 0;
619}
620
621struct nouveau_oclass
622nv04_fifo_oclass = {
623 .handle = NV_ENGINE(FIFO, 0x04),
624 .ofuncs = &(struct nouveau_ofuncs) {
625 .ctor = nv04_fifo_ctor,
626 .dtor = nv04_fifo_dtor,
627 .init = nv04_fifo_init,
628 .fini = _nouveau_fifo_fini,
629 },
630};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 000000000000..496a4b4fdfaf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
1#ifndef __NV04_FIFO_H__
2#define __NV04_FIFO_H__
3
4#include <engine/fifo.h>
5
6#define NV04_PFIFO_DELAY_0 0x00002040
7#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
8#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
9#define NV03_PFIFO_INTR_0 0x00002100
10#define NV03_PFIFO_INTR_EN_0 0x00002140
11# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
12# define NV_PFIFO_INTR_RUNOUT (1<<4)
13# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
14# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
15# define NV_PFIFO_INTR_DMA_PT (1<<16)
16# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
17# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
18#define NV03_PFIFO_RAMHT 0x00002210
19#define NV03_PFIFO_RAMFC 0x00002214
20#define NV03_PFIFO_RAMRO 0x00002218
21#define NV40_PFIFO_RAMFC 0x00002220
22#define NV03_PFIFO_CACHES 0x00002500
23#define NV04_PFIFO_MODE 0x00002504
24#define NV04_PFIFO_DMA 0x00002508
25#define NV04_PFIFO_SIZE 0x0000250c
26#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
27#define NV50_PFIFO_CTX_TABLE__SIZE 128
28#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
29#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
30#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
31#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
32#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
33#define NV03_PFIFO_CACHE0_PULL0 0x00003040
34#define NV04_PFIFO_CACHE0_PULL0 0x00003050
35#define NV04_PFIFO_CACHE0_PULL1 0x00003054
36#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
37#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
38#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
39#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
40#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
41#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
42#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
43#define NV03_PFIFO_CACHE1_PUT 0x00003210
44#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
45#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
46# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
47# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
48# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
49# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
50# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
51# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
52# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
53# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
54# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
55# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
56# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
57# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
58# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
59# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
60# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
61# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
62# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
63# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
64# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
65# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
66# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
67# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
68# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
69# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
70# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
71# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
72# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
73# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
74# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
75# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
76# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
77# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
78# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
79# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
80# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
81# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
82# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
83# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
84# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
85# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
86# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
87# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
88# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
89# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
90# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
91# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
92# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
93# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
94# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
95# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
96# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
97# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
98# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
99# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
100# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
101# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
102# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
103# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
104# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
105# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
106# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
107#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
108#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
109#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
110#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
111#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
112#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
113#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
114#define NV03_PFIFO_CACHE1_PULL0 0x00003240
115#define NV04_PFIFO_CACHE1_PULL0 0x00003250
116# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
117# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
118#define NV03_PFIFO_CACHE1_PULL1 0x00003250
119#define NV04_PFIFO_CACHE1_PULL1 0x00003254
120#define NV04_PFIFO_CACHE1_HASH 0x00003258
121#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
122#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
123#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
124#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
125#define NV03_PFIFO_CACHE1_GET 0x00003270
126#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
127#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
128#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
129#define NV40_PFIFO_UNK32E4 0x000032E4
130#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
131#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
132#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
133#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
134
135struct ramfc_desc {
136 unsigned bits:6;
137 unsigned ctxs:5;
138 unsigned ctxp:8;
139 unsigned regs:5;
140 unsigned regp;
141};
142
143struct nv04_fifo_priv {
144 struct nouveau_fifo base;
145 struct ramfc_desc *ramfc_desc;
146 struct nouveau_ramht *ramht;
147 struct nouveau_gpuobj *ramro;
148 struct nouveau_gpuobj *ramfc;
149};
150
151struct nv04_fifo_base {
152 struct nouveau_fifo_base base;
153};
154
155struct nv04_fifo_chan {
156 struct nouveau_fifo_chan base;
157 u32 subc[8];
158 u32 ramfc;
159};
160
161int nv04_fifo_object_attach(struct nouveau_object *,
162 struct nouveau_object *, u32);
163void nv04_fifo_object_detach(struct nouveau_object *, int);
164
165void nv04_fifo_chan_dtor(struct nouveau_object *);
166int nv04_fifo_chan_init(struct nouveau_object *);
167int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
168
169int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
170 struct nouveau_oclass *, void *, u32,
171 struct nouveau_object **);
172
173void nv04_fifo_dtor(struct nouveau_object *);
174int nv04_fifo_init(struct nouveau_object *);
175void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
176void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
177
178#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
new file mode 100644
index 000000000000..4ba75422b89d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv10_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 {}
50};
51
52/*******************************************************************************
53 * FIFO channel objects
54 ******************************************************************************/
55
56static int
57nv10_fifo_chan_ctor(struct nouveau_object *parent,
58 struct nouveau_object *engine,
59 struct nouveau_oclass *oclass, void *data, u32 size,
60 struct nouveau_object **pobject)
61{
62 struct nv04_fifo_priv *priv = (void *)engine;
63 struct nv04_fifo_chan *chan;
64 struct nv03_channel_dma_class *args = data;
65 int ret;
66
67 if (size < sizeof(*args))
68 return -EINVAL;
69
70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
71 0x10000, args->pushbuf,
72 (1 << NVDEV_ENGINE_DMAOBJ) |
73 (1 << NVDEV_ENGINE_SW) |
74 (1 << NVDEV_ENGINE_GR), &chan);
75 *pobject = nv_object(chan);
76 if (ret)
77 return ret;
78
79 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
80 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
81 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
82 chan->ramfc = chan->base.chid * 32;
83
84 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
85 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
86 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
87 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
88 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
89 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
90#ifdef __BIG_ENDIAN
91 NV_PFIFO_CACHE1_BIG_ENDIAN |
92#endif
93 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
94 return 0;
95}
96
97static struct nouveau_ofuncs
98nv10_fifo_ofuncs = {
99 .ctor = nv10_fifo_chan_ctor,
100 .dtor = nv04_fifo_chan_dtor,
101 .init = nv04_fifo_chan_init,
102 .fini = nv04_fifo_chan_fini,
103 .rd32 = _nouveau_fifo_channel_rd32,
104 .wr32 = _nouveau_fifo_channel_wr32,
105};
106
107static struct nouveau_oclass
108nv10_fifo_sclass[] = {
109 { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
110 {}
111};
112
113/*******************************************************************************
114 * FIFO context - basically just the instmem reserved for the channel
115 ******************************************************************************/
116
117static struct nouveau_oclass
118nv10_fifo_cclass = {
119 .handle = NV_ENGCTX(FIFO, 0x10),
120 .ofuncs = &(struct nouveau_ofuncs) {
121 .ctor = nv04_fifo_context_ctor,
122 .dtor = _nouveau_fifo_context_dtor,
123 .init = _nouveau_fifo_context_init,
124 .fini = _nouveau_fifo_context_fini,
125 .rd32 = _nouveau_fifo_context_rd32,
126 .wr32 = _nouveau_fifo_context_wr32,
127 },
128};
129
130/*******************************************************************************
131 * PFIFO engine
132 ******************************************************************************/
133
134static int
135nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
136 struct nouveau_oclass *oclass, void *data, u32 size,
137 struct nouveau_object **pobject)
138{
139 struct nv04_instmem_priv *imem = nv04_instmem(parent);
140 struct nv04_fifo_priv *priv;
141 int ret;
142
143 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
144 *pobject = nv_object(priv);
145 if (ret)
146 return ret;
147
148 nouveau_ramht_ref(imem->ramht, &priv->ramht);
149 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
150 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
151
152 nv_subdev(priv)->unit = 0x00000100;
153 nv_subdev(priv)->intr = nv04_fifo_intr;
154 nv_engine(priv)->cclass = &nv10_fifo_cclass;
155 nv_engine(priv)->sclass = nv10_fifo_sclass;
156 priv->base.pause = nv04_fifo_pause;
157 priv->base.start = nv04_fifo_start;
158 priv->ramfc_desc = nv10_ramfc;
159 return 0;
160}
161
162struct nouveau_oclass
163nv10_fifo_oclass = {
164 .handle = NV_ENGINE(FIFO, 0x10),
165 .ofuncs = &(struct nouveau_ofuncs) {
166 .ctor = nv10_fifo_ctor,
167 .dtor = nv04_fifo_dtor,
168 .init = nv04_fifo_init,
169 .fini = _nouveau_fifo_fini,
170 },
171};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
new file mode 100644
index 000000000000..b96e6b0ae2b1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -0,0 +1,208 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv17_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
54 {}
55};
56
57/*******************************************************************************
58 * FIFO channel objects
59 ******************************************************************************/
60
61static int
62nv17_fifo_chan_ctor(struct nouveau_object *parent,
63 struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject)
66{
67 struct nv04_fifo_priv *priv = (void *)engine;
68 struct nv04_fifo_chan *chan;
69 struct nv03_channel_dma_class *args = data;
70 int ret;
71
72 if (size < sizeof(*args))
73 return -EINVAL;
74
75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
76 0x10000, args->pushbuf,
77 (1 << NVDEV_ENGINE_DMAOBJ) |
78 (1 << NVDEV_ENGINE_SW) |
79 (1 << NVDEV_ENGINE_GR) |
80 (1 << NVDEV_ENGINE_MPEG), /* NV31- */
81 &chan);
82 *pobject = nv_object(chan);
83 if (ret)
84 return ret;
85
86 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
87 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
88 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
89 chan->ramfc = chan->base.chid * 64;
90
91 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
92 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
93 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
94 nv_wo32(priv->ramfc, chan->ramfc + 0x14,
95 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
96 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
97#ifdef __BIG_ENDIAN
98 NV_PFIFO_CACHE1_BIG_ENDIAN |
99#endif
100 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
101 return 0;
102}
103
104static struct nouveau_ofuncs
105nv17_fifo_ofuncs = {
106 .ctor = nv17_fifo_chan_ctor,
107 .dtor = nv04_fifo_chan_dtor,
108 .init = nv04_fifo_chan_init,
109 .fini = nv04_fifo_chan_fini,
110 .rd32 = _nouveau_fifo_channel_rd32,
111 .wr32 = _nouveau_fifo_channel_wr32,
112};
113
114static struct nouveau_oclass
115nv17_fifo_sclass[] = {
116 { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
117 {}
118};
119
120/*******************************************************************************
121 * FIFO context - basically just the instmem reserved for the channel
122 ******************************************************************************/
123
124static struct nouveau_oclass
125nv17_fifo_cclass = {
126 .handle = NV_ENGCTX(FIFO, 0x17),
127 .ofuncs = &(struct nouveau_ofuncs) {
128 .ctor = nv04_fifo_context_ctor,
129 .dtor = _nouveau_fifo_context_dtor,
130 .init = _nouveau_fifo_context_init,
131 .fini = _nouveau_fifo_context_fini,
132 .rd32 = _nouveau_fifo_context_rd32,
133 .wr32 = _nouveau_fifo_context_wr32,
134 },
135};
136
137/*******************************************************************************
138 * PFIFO engine
139 ******************************************************************************/
140
141static int
142nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv04_instmem_priv *imem = nv04_instmem(parent);
147 struct nv04_fifo_priv *priv;
148 int ret;
149
150 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
151 *pobject = nv_object(priv);
152 if (ret)
153 return ret;
154
155 nouveau_ramht_ref(imem->ramht, &priv->ramht);
156 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
157 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
158
159 nv_subdev(priv)->unit = 0x00000100;
160 nv_subdev(priv)->intr = nv04_fifo_intr;
161 nv_engine(priv)->cclass = &nv17_fifo_cclass;
162 nv_engine(priv)->sclass = nv17_fifo_sclass;
163 priv->base.pause = nv04_fifo_pause;
164 priv->base.start = nv04_fifo_start;
165 priv->ramfc_desc = nv17_ramfc;
166 return 0;
167}
168
169static int
170nv17_fifo_init(struct nouveau_object *object)
171{
172 struct nv04_fifo_priv *priv = (void *)object;
173 int ret;
174
175 ret = nouveau_fifo_init(&priv->base);
176 if (ret)
177 return ret;
178
179 nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
180 nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
181
182 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
183 ((priv->ramht->bits - 9) << 16) |
184 (priv->ramht->base.addr >> 8));
185 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
186 nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
187
188 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
189
190 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
191 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
192
193 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
194 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
195 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
196 return 0;
197}
198
199struct nouveau_oclass
200nv17_fifo_oclass = {
201 .handle = NV_ENGINE(FIFO, 0x17),
202 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv17_fifo_ctor,
204 .dtor = nv04_fifo_dtor,
205 .init = nv17_fifo_init,
206 .fini = _nouveau_fifo_fini,
207 },
208};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
new file mode 100644
index 000000000000..559c3b4e1b86
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -0,0 +1,349 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29
30#include <subdev/instmem.h>
31#include <subdev/instmem/nv04.h>
32#include <subdev/fb.h>
33
34#include <engine/fifo.h>
35
36#include "nv04.h"
37
38static struct ramfc_desc
39nv40_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 2, 28, 0x18, 28, 0x002058 },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
56 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
57 { 32, 0, 0x40, 0, 0x0032e4 },
58 { 32, 0, 0x44, 0, 0x0032e8 },
59 { 32, 0, 0x4c, 0, 0x002088 },
60 { 32, 0, 0x50, 0, 0x003300 },
61 { 32, 0, 0x54, 0, 0x00330c },
62 {}
63};
64
65/*******************************************************************************
66 * FIFO channel objects
67 ******************************************************************************/
68
69static int
70nv40_fifo_object_attach(struct nouveau_object *parent,
71 struct nouveau_object *object, u32 handle)
72{
73 struct nv04_fifo_priv *priv = (void *)parent->engine;
74 struct nv04_fifo_chan *chan = (void *)parent;
75 u32 context, chid = chan->base.chid;
76 int ret;
77
78 if (nv_iclass(object, NV_GPUOBJ_CLASS))
79 context = nv_gpuobj(object)->addr >> 4;
80 else
81 context = 0x00000004; /* just non-zero */
82
83 switch (nv_engidx(object->engine)) {
84 case NVDEV_ENGINE_DMAOBJ:
85 case NVDEV_ENGINE_SW:
86 context |= 0x00000000;
87 break;
88 case NVDEV_ENGINE_GR:
89 context |= 0x00100000;
90 break;
91 case NVDEV_ENGINE_MPEG:
92 context |= 0x00200000;
93 break;
94 default:
95 return -EINVAL;
96 }
97
98 context |= chid << 23;
99
100 mutex_lock(&nv_subdev(priv)->mutex);
101 ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
102 mutex_unlock(&nv_subdev(priv)->mutex);
103 return ret;
104}
105
106static int
107nv40_fifo_context_attach(struct nouveau_object *parent,
108 struct nouveau_object *engctx)
109{
110 struct nv04_fifo_priv *priv = (void *)parent->engine;
111 struct nv04_fifo_chan *chan = (void *)parent;
112 unsigned long flags;
113 u32 reg, ctx;
114
115 switch (nv_engidx(engctx->engine)) {
116 case NVDEV_ENGINE_SW:
117 return 0;
118 case NVDEV_ENGINE_GR:
119 reg = 0x32e0;
120 ctx = 0x38;
121 break;
122 case NVDEV_ENGINE_MPEG:
123 reg = 0x330c;
124 ctx = 0x54;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 spin_lock_irqsave(&priv->base.lock, flags);
131 nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
132 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
133
134 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
135 nv_wr32(priv, reg, nv_engctx(engctx)->addr);
136 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
137
138 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
139 spin_unlock_irqrestore(&priv->base.lock, flags);
140 return 0;
141}
142
143static int
144nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
145 struct nouveau_object *engctx)
146{
147 struct nv04_fifo_priv *priv = (void *)parent->engine;
148 struct nv04_fifo_chan *chan = (void *)parent;
149 unsigned long flags;
150 u32 reg, ctx;
151
152 switch (nv_engidx(engctx->engine)) {
153 case NVDEV_ENGINE_SW:
154 return 0;
155 case NVDEV_ENGINE_GR:
156 reg = 0x32e0;
157 ctx = 0x38;
158 break;
159 case NVDEV_ENGINE_MPEG:
160 reg = 0x330c;
161 ctx = 0x54;
162 break;
163 default:
164 return -EINVAL;
165 }
166
167 spin_lock_irqsave(&priv->base.lock, flags);
168 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
169
170 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
171 nv_wr32(priv, reg, 0x00000000);
172 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
173
174 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
175 spin_unlock_irqrestore(&priv->base.lock, flags);
176 return 0;
177}
178
179static int
180nv40_fifo_chan_ctor(struct nouveau_object *parent,
181 struct nouveau_object *engine,
182 struct nouveau_oclass *oclass, void *data, u32 size,
183 struct nouveau_object **pobject)
184{
185 struct nv04_fifo_priv *priv = (void *)engine;
186 struct nv04_fifo_chan *chan;
187 struct nv03_channel_dma_class *args = data;
188 int ret;
189
190 if (size < sizeof(*args))
191 return -EINVAL;
192
193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
194 0x1000, args->pushbuf,
195 (1 << NVDEV_ENGINE_DMAOBJ) |
196 (1 << NVDEV_ENGINE_SW) |
197 (1 << NVDEV_ENGINE_GR) |
198 (1 << NVDEV_ENGINE_MPEG), &chan);
199 *pobject = nv_object(chan);
200 if (ret)
201 return ret;
202
203 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
204 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
205 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
206 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
207 chan->ramfc = chan->base.chid * 128;
208
209 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
210 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
211 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
212 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
213 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
214 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
215#ifdef __BIG_ENDIAN
216 NV_PFIFO_CACHE1_BIG_ENDIAN |
217#endif
218 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
219 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
220 return 0;
221}
222
223static struct nouveau_ofuncs
224nv40_fifo_ofuncs = {
225 .ctor = nv40_fifo_chan_ctor,
226 .dtor = nv04_fifo_chan_dtor,
227 .init = nv04_fifo_chan_init,
228 .fini = nv04_fifo_chan_fini,
229 .rd32 = _nouveau_fifo_channel_rd32,
230 .wr32 = _nouveau_fifo_channel_wr32,
231};
232
233static struct nouveau_oclass
234nv40_fifo_sclass[] = {
235 { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
236 {}
237};
238
239/*******************************************************************************
240 * FIFO context - basically just the instmem reserved for the channel
241 ******************************************************************************/
242
243static struct nouveau_oclass
244nv40_fifo_cclass = {
245 .handle = NV_ENGCTX(FIFO, 0x40),
246 .ofuncs = &(struct nouveau_ofuncs) {
247 .ctor = nv04_fifo_context_ctor,
248 .dtor = _nouveau_fifo_context_dtor,
249 .init = _nouveau_fifo_context_init,
250 .fini = _nouveau_fifo_context_fini,
251 .rd32 = _nouveau_fifo_context_rd32,
252 .wr32 = _nouveau_fifo_context_wr32,
253 },
254};
255
256/*******************************************************************************
257 * PFIFO engine
258 ******************************************************************************/
259
260static int
261nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
262 struct nouveau_oclass *oclass, void *data, u32 size,
263 struct nouveau_object **pobject)
264{
265 struct nv04_instmem_priv *imem = nv04_instmem(parent);
266 struct nv04_fifo_priv *priv;
267 int ret;
268
269 ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
270 *pobject = nv_object(priv);
271 if (ret)
272 return ret;
273
274 nouveau_ramht_ref(imem->ramht, &priv->ramht);
275 nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
276 nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
277
278 nv_subdev(priv)->unit = 0x00000100;
279 nv_subdev(priv)->intr = nv04_fifo_intr;
280 nv_engine(priv)->cclass = &nv40_fifo_cclass;
281 nv_engine(priv)->sclass = nv40_fifo_sclass;
282 priv->base.pause = nv04_fifo_pause;
283 priv->base.start = nv04_fifo_start;
284 priv->ramfc_desc = nv40_ramfc;
285 return 0;
286}
287
288static int
289nv40_fifo_init(struct nouveau_object *object)
290{
291 struct nv04_fifo_priv *priv = (void *)object;
292 struct nouveau_fb *pfb = nouveau_fb(object);
293 int ret;
294
295 ret = nouveau_fifo_init(&priv->base);
296 if (ret)
297 return ret;
298
299 nv_wr32(priv, 0x002040, 0x000000ff);
300 nv_wr32(priv, 0x002044, 0x2101ffff);
301 nv_wr32(priv, 0x002058, 0x00000001);
302
303 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
304 ((priv->ramht->bits - 9) << 16) |
305 (priv->ramht->base.addr >> 8));
306 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
307
308 switch (nv_device(priv)->chipset) {
309 case 0x47:
310 case 0x49:
311 case 0x4b:
312 nv_wr32(priv, 0x002230, 0x00000001);
313 case 0x40:
314 case 0x41:
315 case 0x42:
316 case 0x43:
317 case 0x45:
318 case 0x48:
319 nv_wr32(priv, 0x002220, 0x00030002);
320 break;
321 default:
322 nv_wr32(priv, 0x002230, 0x00000000);
323 nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
324 priv->ramfc->addr) >> 16) |
325 0x00030000);
326 break;
327 }
328
329 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
330
331 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
332 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
333
334 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
335 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
336 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
337 return 0;
338}
339
340struct nouveau_oclass
341nv40_fifo_oclass = {
342 .handle = NV_ENGINE(FIFO, 0x40),
343 .ofuncs = &(struct nouveau_ofuncs) {
344 .ctor = nv40_fifo_ctor,
345 .dtor = nv04_fifo_dtor,
346 .init = nv40_fifo_init,
347 .fini = _nouveau_fifo_fini,
348 },
349};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
new file mode 100644
index 000000000000..536e7634a00d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -0,0 +1,502 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/engctx.h>
27#include <core/ramht.h>
28#include <core/class.h>
29#include <core/math.h>
30
31#include <subdev/timer.h>
32#include <subdev/bar.h>
33
34#include <engine/dmaobj.h>
35#include <engine/fifo.h>
36
37#include "nv50.h"
38
39/*******************************************************************************
40 * FIFO channel objects
41 ******************************************************************************/
42
43void
44nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
45{
46 struct nouveau_bar *bar = nouveau_bar(priv);
47 struct nouveau_gpuobj *cur;
48 int i, p;
49
50 cur = priv->playlist[priv->cur_playlist];
51 priv->cur_playlist = !priv->cur_playlist;
52
53 for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
54 if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
55 nv_wo32(cur, p++ * 4, i);
56 }
57
58 bar->flush(bar);
59
60 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
61 nv_wr32(priv, 0x0032ec, p);
62 nv_wr32(priv, 0x002500, 0x00000101);
63}
64
65static int
66nv50_fifo_context_attach(struct nouveau_object *parent,
67 struct nouveau_object *object)
68{
69 struct nouveau_bar *bar = nouveau_bar(parent);
70 struct nv50_fifo_base *base = (void *)parent->parent;
71 struct nouveau_gpuobj *ectx = (void *)object;
72 u64 limit = ectx->addr + ectx->size - 1;
73 u64 start = ectx->addr;
74 u32 addr;
75
76 switch (nv_engidx(object->engine)) {
77 case NVDEV_ENGINE_SW : return 0;
78 case NVDEV_ENGINE_GR : addr = 0x0000; break;
79 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
80 default:
81 return -EINVAL;
82 }
83
84 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
85 nv_wo32(base->eng, addr + 0x00, 0x00190000);
86 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
87 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
88 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
89 upper_32_bits(start));
90 nv_wo32(base->eng, addr + 0x10, 0x00000000);
91 nv_wo32(base->eng, addr + 0x14, 0x00000000);
92 bar->flush(bar);
93 return 0;
94}
95
96static int
97nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
98 struct nouveau_object *object)
99{
100 struct nouveau_bar *bar = nouveau_bar(parent);
101 struct nv50_fifo_priv *priv = (void *)parent->engine;
102 struct nv50_fifo_base *base = (void *)parent->parent;
103 struct nv50_fifo_chan *chan = (void *)parent;
104 u32 addr, me;
105 int ret = 0;
106
107 switch (nv_engidx(object->engine)) {
108 case NVDEV_ENGINE_SW : return 0;
109 case NVDEV_ENGINE_GR : addr = 0x0000; break;
110 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
111 default:
112 return -EINVAL;
113 }
114
115 nv_wo32(base->eng, addr + 0x00, 0x00000000);
116 nv_wo32(base->eng, addr + 0x04, 0x00000000);
117 nv_wo32(base->eng, addr + 0x08, 0x00000000);
118 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
119 nv_wo32(base->eng, addr + 0x10, 0x00000000);
120 nv_wo32(base->eng, addr + 0x14, 0x00000000);
121 bar->flush(bar);
122
123 /* HW bug workaround:
124 *
125 * PFIFO will hang forever if the connected engines don't report
126 * that they've processed the context switch request.
127 *
128 * In order for the kickoff to work, we need to ensure all the
129 * connected engines are in a state where they can answer.
130 *
131 * Newer chipsets don't seem to suffer from this issue, and well,
132 * there's also a "ignore these engines" bitmask reg we can use
133 * if we hit the issue there..
134 */
135 me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
136
137 /* do the kickoff... */
138 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
139 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
140 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
141 if (suspend)
142 ret = -EBUSY;
143 }
144
145 nv_wr32(priv, 0x00b860, me);
146 return ret;
147}
148
149static int
150nv50_fifo_object_attach(struct nouveau_object *parent,
151 struct nouveau_object *object, u32 handle)
152{
153 struct nv50_fifo_chan *chan = (void *)parent;
154 u32 context;
155
156 if (nv_iclass(object, NV_GPUOBJ_CLASS))
157 context = nv_gpuobj(object)->node->offset >> 4;
158 else
159 context = 0x00000004; /* just non-zero */
160
161 switch (nv_engidx(object->engine)) {
162 case NVDEV_ENGINE_DMAOBJ:
163 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
164 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
165 case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
166 default:
167 return -EINVAL;
168 }
169
170 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
171}
172
173void
174nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
175{
176 struct nv50_fifo_chan *chan = (void *)parent;
177 nouveau_ramht_remove(chan->ramht, cookie);
178}
179
180static int
181nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
182 struct nouveau_object *engine,
183 struct nouveau_oclass *oclass, void *data, u32 size,
184 struct nouveau_object **pobject)
185{
186 struct nouveau_bar *bar = nouveau_bar(parent);
187 struct nv50_fifo_base *base = (void *)parent;
188 struct nv50_fifo_chan *chan;
189 struct nv03_channel_dma_class *args = data;
190 int ret;
191
192 if (size < sizeof(*args))
193 return -EINVAL;
194
195 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
196 0x2000, args->pushbuf,
197 (1 << NVDEV_ENGINE_DMAOBJ) |
198 (1 << NVDEV_ENGINE_SW) |
199 (1 << NVDEV_ENGINE_GR) |
200 (1 << NVDEV_ENGINE_MPEG), &chan);
201 *pobject = nv_object(chan);
202 if (ret)
203 return ret;
204
205 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
206 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
207 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
208 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
209
210 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
211 if (ret)
212 return ret;
213
214 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
215 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
216 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
217 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
218 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
219 nv_wo32(base->ramfc, 0x44, 0x01003fff);
220 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
221 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
222 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
223 nv_wo32(base->ramfc, 0x78, 0x00000000);
224 nv_wo32(base->ramfc, 0x7c, 0x30000001);
225 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
226 (4 << 24) /* SEARCH_FULL */ |
227 (chan->ramht->base.node->offset >> 4));
228 bar->flush(bar);
229 return 0;
230}
231
232static int
233nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
234 struct nouveau_object *engine,
235 struct nouveau_oclass *oclass, void *data, u32 size,
236 struct nouveau_object **pobject)
237{
238 struct nv50_channel_ind_class *args = data;
239 struct nouveau_bar *bar = nouveau_bar(parent);
240 struct nv50_fifo_base *base = (void *)parent;
241 struct nv50_fifo_chan *chan;
242 u64 ioffset, ilength;
243 int ret;
244
245 if (size < sizeof(*args))
246 return -EINVAL;
247
248 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
249 0x2000, args->pushbuf,
250 (1 << NVDEV_ENGINE_DMAOBJ) |
251 (1 << NVDEV_ENGINE_SW) |
252 (1 << NVDEV_ENGINE_GR) |
253 (1 << NVDEV_ENGINE_MPEG), &chan);
254 *pobject = nv_object(chan);
255 if (ret)
256 return ret;
257
258 nv_parent(chan)->context_attach = nv50_fifo_context_attach;
259 nv_parent(chan)->context_detach = nv50_fifo_context_detach;
260 nv_parent(chan)->object_attach = nv50_fifo_object_attach;
261 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
262
263 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
264 if (ret)
265 return ret;
266
267 ioffset = args->ioffset;
268 ilength = log2i(args->ilength / 8);
269
270 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
271 nv_wo32(base->ramfc, 0x44, 0x01003fff);
272 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
273 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
274 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
275 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
276 nv_wo32(base->ramfc, 0x78, 0x00000000);
277 nv_wo32(base->ramfc, 0x7c, 0x30000001);
278 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
279 (4 << 24) /* SEARCH_FULL */ |
280 (chan->ramht->base.node->offset >> 4));
281 bar->flush(bar);
282 return 0;
283}
284
285void
286nv50_fifo_chan_dtor(struct nouveau_object *object)
287{
288 struct nv50_fifo_chan *chan = (void *)object;
289 nouveau_ramht_ref(NULL, &chan->ramht);
290 nouveau_fifo_channel_destroy(&chan->base);
291}
292
293static int
294nv50_fifo_chan_init(struct nouveau_object *object)
295{
296 struct nv50_fifo_priv *priv = (void *)object->engine;
297 struct nv50_fifo_base *base = (void *)object->parent;
298 struct nv50_fifo_chan *chan = (void *)object;
299 struct nouveau_gpuobj *ramfc = base->ramfc;
300 u32 chid = chan->base.chid;
301 int ret;
302
303 ret = nouveau_fifo_channel_init(&chan->base);
304 if (ret)
305 return ret;
306
307 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
308 nv50_fifo_playlist_update(priv);
309 return 0;
310}
311
312int
313nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
314{
315 struct nv50_fifo_priv *priv = (void *)object->engine;
316 struct nv50_fifo_chan *chan = (void *)object;
317 u32 chid = chan->base.chid;
318
319 /* remove channel from playlist, fifo will unload context */
320 nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
321 nv50_fifo_playlist_update(priv);
322 nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
323
324 return nouveau_fifo_channel_fini(&chan->base, suspend);
325}
326
327static struct nouveau_ofuncs
328nv50_fifo_ofuncs_dma = {
329 .ctor = nv50_fifo_chan_ctor_dma,
330 .dtor = nv50_fifo_chan_dtor,
331 .init = nv50_fifo_chan_init,
332 .fini = nv50_fifo_chan_fini,
333 .rd32 = _nouveau_fifo_channel_rd32,
334 .wr32 = _nouveau_fifo_channel_wr32,
335};
336
337static struct nouveau_ofuncs
338nv50_fifo_ofuncs_ind = {
339 .ctor = nv50_fifo_chan_ctor_ind,
340 .dtor = nv50_fifo_chan_dtor,
341 .init = nv50_fifo_chan_init,
342 .fini = nv50_fifo_chan_fini,
343 .rd32 = _nouveau_fifo_channel_rd32,
344 .wr32 = _nouveau_fifo_channel_wr32,
345};
346
347static struct nouveau_oclass
348nv50_fifo_sclass[] = {
349 { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
350 { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
351 {}
352};
353
354/*******************************************************************************
355 * FIFO context - basically just the instmem reserved for the channel
356 ******************************************************************************/
357
358static int
359nv50_fifo_context_ctor(struct nouveau_object *parent,
360 struct nouveau_object *engine,
361 struct nouveau_oclass *oclass, void *data, u32 size,
362 struct nouveau_object **pobject)
363{
364 struct nv50_fifo_base *base;
365 int ret;
366
367 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
368 0x1000, NVOBJ_FLAG_HEAP, &base);
369 *pobject = nv_object(base);
370 if (ret)
371 return ret;
372
373 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
374 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
375 if (ret)
376 return ret;
377
378 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
379 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
380 if (ret)
381 return ret;
382
383 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
384 &base->pgd);
385 if (ret)
386 return ret;
387
388 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
389 if (ret)
390 return ret;
391
392 return 0;
393}
394
395void
396nv50_fifo_context_dtor(struct nouveau_object *object)
397{
398 struct nv50_fifo_base *base = (void *)object;
399 nouveau_vm_ref(NULL, &base->vm, base->pgd);
400 nouveau_gpuobj_ref(NULL, &base->pgd);
401 nouveau_gpuobj_ref(NULL, &base->eng);
402 nouveau_gpuobj_ref(NULL, &base->ramfc);
403 nouveau_gpuobj_ref(NULL, &base->cache);
404 nouveau_fifo_context_destroy(&base->base);
405}
406
407static struct nouveau_oclass
408nv50_fifo_cclass = {
409 .handle = NV_ENGCTX(FIFO, 0x50),
410 .ofuncs = &(struct nouveau_ofuncs) {
411 .ctor = nv50_fifo_context_ctor,
412 .dtor = nv50_fifo_context_dtor,
413 .init = _nouveau_fifo_context_init,
414 .fini = _nouveau_fifo_context_fini,
415 .rd32 = _nouveau_fifo_context_rd32,
416 .wr32 = _nouveau_fifo_context_wr32,
417 },
418};
419
420/*******************************************************************************
421 * PFIFO engine
422 ******************************************************************************/
423
424static int
425nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
426 struct nouveau_oclass *oclass, void *data, u32 size,
427 struct nouveau_object **pobject)
428{
429 struct nv50_fifo_priv *priv;
430 int ret;
431
432 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
433 *pobject = nv_object(priv);
434 if (ret)
435 return ret;
436
437 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
438 &priv->playlist[0]);
439 if (ret)
440 return ret;
441
442 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
443 &priv->playlist[1]);
444 if (ret)
445 return ret;
446
447 nv_subdev(priv)->unit = 0x00000100;
448 nv_subdev(priv)->intr = nv04_fifo_intr;
449 nv_engine(priv)->cclass = &nv50_fifo_cclass;
450 nv_engine(priv)->sclass = nv50_fifo_sclass;
451 return 0;
452}
453
454void
455nv50_fifo_dtor(struct nouveau_object *object)
456{
457 struct nv50_fifo_priv *priv = (void *)object;
458
459 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
460 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
461
462 nouveau_fifo_destroy(&priv->base);
463}
464
465int
466nv50_fifo_init(struct nouveau_object *object)
467{
468 struct nv50_fifo_priv *priv = (void *)object;
469 int ret, i;
470
471 ret = nouveau_fifo_init(&priv->base);
472 if (ret)
473 return ret;
474
475 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
476 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
477 nv_wr32(priv, 0x00250c, 0x6f3cfc34);
478 nv_wr32(priv, 0x002044, 0x01003fff);
479
480 nv_wr32(priv, 0x002100, 0xffffffff);
481 nv_wr32(priv, 0x002140, 0xffffffff);
482
483 for (i = 0; i < 128; i++)
484 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
485 nv50_fifo_playlist_update(priv);
486
487 nv_wr32(priv, 0x003200, 0x00000001);
488 nv_wr32(priv, 0x003250, 0x00000001);
489 nv_wr32(priv, 0x002500, 0x00000001);
490 return 0;
491}
492
493struct nouveau_oclass
494nv50_fifo_oclass = {
495 .handle = NV_ENGINE(FIFO, 0x50),
496 .ofuncs = &(struct nouveau_ofuncs) {
497 .ctor = nv50_fifo_ctor,
498 .dtor = nv50_fifo_dtor,
499 .init = nv50_fifo_init,
500 .fini = _nouveau_fifo_fini,
501 },
502};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 000000000000..3a9ceb315c20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
1#ifndef __NV50_FIFO_H__
2#define __NV50_FIFO_H__
3
4struct nv50_fifo_priv {
5 struct nouveau_fifo base;
6 struct nouveau_gpuobj *playlist[2];
7 int cur_playlist;
8};
9
10struct nv50_fifo_base {
11 struct nouveau_fifo_base base;
12 struct nouveau_gpuobj *ramfc;
13 struct nouveau_gpuobj *cache;
14 struct nouveau_gpuobj *eng;
15 struct nouveau_gpuobj *pgd;
16 struct nouveau_vm *vm;
17};
18
19struct nv50_fifo_chan {
20 struct nouveau_fifo_chan base;
21 u32 subc[8];
22 struct nouveau_ramht *ramht;
23};
24
25void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
26
27void nv50_fifo_object_detach(struct nouveau_object *, int);
28void nv50_fifo_chan_dtor(struct nouveau_object *);
29int nv50_fifo_chan_fini(struct nouveau_object *, bool);
30
31void nv50_fifo_context_dtor(struct nouveau_object *);
32
33void nv50_fifo_dtor(struct nouveau_object *);
34int nv50_fifo_init(struct nouveau_object *);
35
36#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
new file mode 100644
index 000000000000..b4fd26d8f166
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -0,0 +1,420 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/client.h>
27#include <core/engctx.h>
28#include <core/ramht.h>
29#include <core/class.h>
30#include <core/math.h>
31
32#include <subdev/timer.h>
33#include <subdev/bar.h>
34
35#include <engine/dmaobj.h>
36#include <engine/fifo.h>
37
38#include "nv50.h"
39
40/*******************************************************************************
41 * FIFO channel objects
42 ******************************************************************************/
43
44static int
45nv84_fifo_context_attach(struct nouveau_object *parent,
46 struct nouveau_object *object)
47{
48 struct nouveau_bar *bar = nouveau_bar(parent);
49 struct nv50_fifo_base *base = (void *)parent->parent;
50 struct nouveau_gpuobj *ectx = (void *)object;
51 u64 limit = ectx->addr + ectx->size - 1;
52 u64 start = ectx->addr;
53 u32 addr;
54
55 switch (nv_engidx(object->engine)) {
56 case NVDEV_ENGINE_SW : return 0;
57 case NVDEV_ENGINE_GR : addr = 0x0020; break;
58 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
59 case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
60 case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
61 default:
62 return -EINVAL;
63 }
64
65 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
66 nv_wo32(base->eng, addr + 0x00, 0x00190000);
67 nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
68 nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
69 nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
70 upper_32_bits(start));
71 nv_wo32(base->eng, addr + 0x10, 0x00000000);
72 nv_wo32(base->eng, addr + 0x14, 0x00000000);
73 bar->flush(bar);
74 return 0;
75}
76
77static int
78nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
79 struct nouveau_object *object)
80{
81 struct nouveau_bar *bar = nouveau_bar(parent);
82 struct nv50_fifo_priv *priv = (void *)parent->engine;
83 struct nv50_fifo_base *base = (void *)parent->parent;
84 struct nv50_fifo_chan *chan = (void *)parent;
85 u32 addr, save, engn;
86 bool done;
87
88 switch (nv_engidx(object->engine)) {
89 case NVDEV_ENGINE_SW : return 0;
90 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
91 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
92 case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
93 case NVDEV_ENGINE_COPY0: engn = 2; addr = 0x00c0; break;
94 default:
95 return -EINVAL;
96 }
97
98 nv_wo32(base->eng, addr + 0x00, 0x00000000);
99 nv_wo32(base->eng, addr + 0x04, 0x00000000);
100 nv_wo32(base->eng, addr + 0x08, 0x00000000);
101 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
102 nv_wo32(base->eng, addr + 0x10, 0x00000000);
103 nv_wo32(base->eng, addr + 0x14, 0x00000000);
104 bar->flush(bar);
105
106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
109 nv_wr32(priv, 0x002520, save);
110 if (!done) {
111 nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
112 if (suspend)
113 return -EBUSY;
114 }
115 return 0;
116}
117
118static int
119nv84_fifo_object_attach(struct nouveau_object *parent,
120 struct nouveau_object *object, u32 handle)
121{
122 struct nv50_fifo_chan *chan = (void *)parent;
123 u32 context;
124
125 if (nv_iclass(object, NV_GPUOBJ_CLASS))
126 context = nv_gpuobj(object)->node->offset >> 4;
127 else
128 context = 0x00000004; /* just non-zero */
129
130 switch (nv_engidx(object->engine)) {
131 case NVDEV_ENGINE_DMAOBJ:
132 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
133 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
134 case NVDEV_ENGINE_MPEG :
135 case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
136 case NVDEV_ENGINE_ME :
137 case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
138 case NVDEV_ENGINE_VP : context |= 0x00400000; break;
139 case NVDEV_ENGINE_CRYPT :
140 case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
141 case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
142 default:
143 return -EINVAL;
144 }
145
146 return nouveau_ramht_insert(chan->ramht, 0, handle, context);
147}
148
149static int
150nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
151 struct nouveau_object *engine,
152 struct nouveau_oclass *oclass, void *data, u32 size,
153 struct nouveau_object **pobject)
154{
155 struct nouveau_bar *bar = nouveau_bar(parent);
156 struct nv50_fifo_base *base = (void *)parent;
157 struct nv50_fifo_chan *chan;
158 struct nv03_channel_dma_class *args = data;
159 int ret;
160
161 if (size < sizeof(*args))
162 return -EINVAL;
163
164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
165 0x2000, args->pushbuf,
166 (1 << NVDEV_ENGINE_DMAOBJ) |
167 (1 << NVDEV_ENGINE_SW) |
168 (1 << NVDEV_ENGINE_GR) |
169 (1 << NVDEV_ENGINE_MPEG) |
170 (1 << NVDEV_ENGINE_ME) |
171 (1 << NVDEV_ENGINE_VP) |
172 (1 << NVDEV_ENGINE_CRYPT) |
173 (1 << NVDEV_ENGINE_BSP) |
174 (1 << NVDEV_ENGINE_PPP) |
175 (1 << NVDEV_ENGINE_COPY0) |
176 (1 << NVDEV_ENGINE_UNK1C1), &chan);
177 *pobject = nv_object(chan);
178 if (ret)
179 return ret;
180
181 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
182 if (ret)
183 return ret;
184
185 nv_parent(chan)->context_attach = nv84_fifo_context_attach;
186 nv_parent(chan)->context_detach = nv84_fifo_context_detach;
187 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
188 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
189
190 nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
191 nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
192 nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
193 nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
194 nv_wo32(base->ramfc, 0x3c, 0x003f6078);
195 nv_wo32(base->ramfc, 0x44, 0x01003fff);
196 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
197 nv_wo32(base->ramfc, 0x4c, 0xffffffff);
198 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
199 nv_wo32(base->ramfc, 0x78, 0x00000000);
200 nv_wo32(base->ramfc, 0x7c, 0x30000001);
201 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
202 (4 << 24) /* SEARCH_FULL */ |
203 (chan->ramht->base.node->offset >> 4));
204 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
205 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
206 bar->flush(bar);
207 return 0;
208}
209
210static int
211nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
212 struct nouveau_object *engine,
213 struct nouveau_oclass *oclass, void *data, u32 size,
214 struct nouveau_object **pobject)
215{
216 struct nouveau_bar *bar = nouveau_bar(parent);
217 struct nv50_fifo_base *base = (void *)parent;
218 struct nv50_fifo_chan *chan;
219 struct nv50_channel_ind_class *args = data;
220 u64 ioffset, ilength;
221 int ret;
222
223 if (size < sizeof(*args))
224 return -EINVAL;
225
226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
227 0x2000, args->pushbuf,
228 (1 << NVDEV_ENGINE_DMAOBJ) |
229 (1 << NVDEV_ENGINE_SW) |
230 (1 << NVDEV_ENGINE_GR) |
231 (1 << NVDEV_ENGINE_MPEG) |
232 (1 << NVDEV_ENGINE_ME) |
233 (1 << NVDEV_ENGINE_VP) |
234 (1 << NVDEV_ENGINE_CRYPT) |
235 (1 << NVDEV_ENGINE_BSP) |
236 (1 << NVDEV_ENGINE_PPP) |
237 (1 << NVDEV_ENGINE_COPY0) |
238 (1 << NVDEV_ENGINE_UNK1C1), &chan);
239 *pobject = nv_object(chan);
240 if (ret)
241 return ret;
242
243 ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
244 if (ret)
245 return ret;
246
247 nv_parent(chan)->context_attach = nv84_fifo_context_attach;
248 nv_parent(chan)->context_detach = nv84_fifo_context_detach;
249 nv_parent(chan)->object_attach = nv84_fifo_object_attach;
250 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
251
252 ioffset = args->ioffset;
253 ilength = log2i(args->ilength / 8);
254
255 nv_wo32(base->ramfc, 0x3c, 0x403f6078);
256 nv_wo32(base->ramfc, 0x44, 0x01003fff);
257 nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
258 nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
259 nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
260 nv_wo32(base->ramfc, 0x60, 0x7fffffff);
261 nv_wo32(base->ramfc, 0x78, 0x00000000);
262 nv_wo32(base->ramfc, 0x7c, 0x30000001);
263 nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
264 (4 << 24) /* SEARCH_FULL */ |
265 (chan->ramht->base.node->offset >> 4));
266 nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
267 nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
268 bar->flush(bar);
269 return 0;
270}
271
272static int
273nv84_fifo_chan_init(struct nouveau_object *object)
274{
275 struct nv50_fifo_priv *priv = (void *)object->engine;
276 struct nv50_fifo_base *base = (void *)object->parent;
277 struct nv50_fifo_chan *chan = (void *)object;
278 struct nouveau_gpuobj *ramfc = base->ramfc;
279 u32 chid = chan->base.chid;
280 int ret;
281
282 ret = nouveau_fifo_channel_init(&chan->base);
283 if (ret)
284 return ret;
285
286 nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
287 nv50_fifo_playlist_update(priv);
288 return 0;
289}
290
291static struct nouveau_ofuncs
292nv84_fifo_ofuncs_dma = {
293 .ctor = nv84_fifo_chan_ctor_dma,
294 .dtor = nv50_fifo_chan_dtor,
295 .init = nv84_fifo_chan_init,
296 .fini = nv50_fifo_chan_fini,
297 .rd32 = _nouveau_fifo_channel_rd32,
298 .wr32 = _nouveau_fifo_channel_wr32,
299};
300
301static struct nouveau_ofuncs
302nv84_fifo_ofuncs_ind = {
303 .ctor = nv84_fifo_chan_ctor_ind,
304 .dtor = nv50_fifo_chan_dtor,
305 .init = nv84_fifo_chan_init,
306 .fini = nv50_fifo_chan_fini,
307 .rd32 = _nouveau_fifo_channel_rd32,
308 .wr32 = _nouveau_fifo_channel_wr32,
309};
310
311static struct nouveau_oclass
312nv84_fifo_sclass[] = {
313 { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
314 { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
315 {}
316};
317
318/*******************************************************************************
319 * FIFO context - basically just the instmem reserved for the channel
320 ******************************************************************************/
321
322static int
323nv84_fifo_context_ctor(struct nouveau_object *parent,
324 struct nouveau_object *engine,
325 struct nouveau_oclass *oclass, void *data, u32 size,
326 struct nouveau_object **pobject)
327{
328 struct nv50_fifo_base *base;
329 int ret;
330
331 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
332 0x1000, NVOBJ_FLAG_HEAP, &base);
333 *pobject = nv_object(base);
334 if (ret)
335 return ret;
336
337 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
338 NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
339 if (ret)
340 return ret;
341
342 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
343 0, &base->pgd);
344 if (ret)
345 return ret;
346
347 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
348 if (ret)
349 return ret;
350
351 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
352 NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
353 if (ret)
354 return ret;
355
356 ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
357 NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
358 if (ret)
359 return ret;
360
361 return 0;
362}
363
364static struct nouveau_oclass
365nv84_fifo_cclass = {
366 .handle = NV_ENGCTX(FIFO, 0x84),
367 .ofuncs = &(struct nouveau_ofuncs) {
368 .ctor = nv84_fifo_context_ctor,
369 .dtor = nv50_fifo_context_dtor,
370 .init = _nouveau_fifo_context_init,
371 .fini = _nouveau_fifo_context_fini,
372 .rd32 = _nouveau_fifo_context_rd32,
373 .wr32 = _nouveau_fifo_context_wr32,
374 },
375};
376
377/*******************************************************************************
378 * PFIFO engine
379 ******************************************************************************/
380
381static int
382nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
383 struct nouveau_oclass *oclass, void *data, u32 size,
384 struct nouveau_object **pobject)
385{
386 struct nv50_fifo_priv *priv;
387 int ret;
388
389 ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
390 *pobject = nv_object(priv);
391 if (ret)
392 return ret;
393
394 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
395 &priv->playlist[0]);
396 if (ret)
397 return ret;
398
399 ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
400 &priv->playlist[1]);
401 if (ret)
402 return ret;
403
404 nv_subdev(priv)->unit = 0x00000100;
405 nv_subdev(priv)->intr = nv04_fifo_intr;
406 nv_engine(priv)->cclass = &nv84_fifo_cclass;
407 nv_engine(priv)->sclass = nv84_fifo_sclass;
408 return 0;
409}
410
411struct nouveau_oclass
412nv84_fifo_oclass = {
413 .handle = NV_ENGINE(FIFO, 0x84),
414 .ofuncs = &(struct nouveau_ofuncs) {
415 .ctor = nv84_fifo_ctor,
416 .dtor = nv50_fifo_dtor,
417 .init = nv50_fifo_init,
418 .fini = _nouveau_fifo_fini,
419 },
420};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
new file mode 100644
index 000000000000..6f21be600557
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -0,0 +1,647 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
33
34#include <subdev/timer.h>
35#include <subdev/bar.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40
41struct nvc0_fifo_priv {
42 struct nouveau_fifo base;
43 struct nouveau_gpuobj *playlist[2];
44 int cur_playlist;
45 struct {
46 struct nouveau_gpuobj *mem;
47 struct nouveau_vma bar;
48 } user;
49 int spoon_nr;
50};
51
52struct nvc0_fifo_base {
53 struct nouveau_fifo_base base;
54 struct nouveau_gpuobj *pgd;
55 struct nouveau_vm *vm;
56};
57
58struct nvc0_fifo_chan {
59 struct nouveau_fifo_chan base;
60};
61
62/*******************************************************************************
63 * FIFO channel objects
64 ******************************************************************************/
65
66static void
67nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
68{
69 struct nouveau_bar *bar = nouveau_bar(priv);
70 struct nouveau_gpuobj *cur;
71 int i, p;
72
73 cur = priv->playlist[priv->cur_playlist];
74 priv->cur_playlist = !priv->cur_playlist;
75
76 for (i = 0, p = 0; i < 128; i++) {
77 if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
78 continue;
79 nv_wo32(cur, p + 0, i);
80 nv_wo32(cur, p + 4, 0x00000004);
81 p += 8;
82 }
83 bar->flush(bar);
84
85 nv_wr32(priv, 0x002270, cur->addr >> 12);
86 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
87 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
88 nv_error(priv, "playlist update failed\n");
89}
90
91static int
92nvc0_fifo_context_attach(struct nouveau_object *parent,
93 struct nouveau_object *object)
94{
95 struct nouveau_bar *bar = nouveau_bar(parent);
96 struct nvc0_fifo_base *base = (void *)parent->parent;
97 struct nouveau_engctx *ectx = (void *)object;
98 u32 addr;
99 int ret;
100
101 switch (nv_engidx(object->engine)) {
102 case NVDEV_ENGINE_SW : return 0;
103 case NVDEV_ENGINE_GR : addr = 0x0210; break;
104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
106 default:
107 return -EINVAL;
108 }
109
110 if (!ectx->vma.node) {
111 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
112 NV_MEM_ACCESS_RW, &ectx->vma);
113 if (ret)
114 return ret;
115
116 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
117 }
118
119 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
120 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
121 bar->flush(bar);
122 return 0;
123}
124
125static int
126nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
127 struct nouveau_object *object)
128{
129 struct nouveau_bar *bar = nouveau_bar(parent);
130 struct nvc0_fifo_priv *priv = (void *)parent->engine;
131 struct nvc0_fifo_base *base = (void *)parent->parent;
132 struct nvc0_fifo_chan *chan = (void *)parent;
133 u32 addr;
134
135 switch (nv_engidx(object->engine)) {
136 case NVDEV_ENGINE_SW : return 0;
137 case NVDEV_ENGINE_GR : addr = 0x0210; break;
138 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
139 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
140 default:
141 return -EINVAL;
142 }
143
144 nv_wo32(base, addr + 0x00, 0x00000000);
145 nv_wo32(base, addr + 0x04, 0x00000000);
146 bar->flush(bar);
147
148 nv_wr32(priv, 0x002634, chan->base.chid);
149 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
150 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
151 if (suspend)
152 return -EBUSY;
153 }
154
155 return 0;
156}
157
158static int
159nvc0_fifo_chan_ctor(struct nouveau_object *parent,
160 struct nouveau_object *engine,
161 struct nouveau_oclass *oclass, void *data, u32 size,
162 struct nouveau_object **pobject)
163{
164 struct nouveau_bar *bar = nouveau_bar(parent);
165 struct nvc0_fifo_priv *priv = (void *)engine;
166 struct nvc0_fifo_base *base = (void *)parent;
167 struct nvc0_fifo_chan *chan;
168 struct nv50_channel_ind_class *args = data;
169 u64 usermem, ioffset, ilength;
170 int ret, i;
171
172 if (size < sizeof(*args))
173 return -EINVAL;
174
175 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
176 priv->user.bar.offset, 0x1000,
177 args->pushbuf,
178 (1 << NVDEV_ENGINE_SW) |
179 (1 << NVDEV_ENGINE_GR) |
180 (1 << NVDEV_ENGINE_COPY0) |
181 (1 << NVDEV_ENGINE_COPY1), &chan);
182 *pobject = nv_object(chan);
183 if (ret)
184 return ret;
185
186 nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
187 nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
188
189 usermem = chan->base.chid * 0x1000;
190 ioffset = args->ioffset;
191 ilength = log2i(args->ilength / 8);
192
193 for (i = 0; i < 0x1000; i += 4)
194 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
195
196 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
197 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
198 nv_wo32(base, 0x10, 0x0000face);
199 nv_wo32(base, 0x30, 0xfffff902);
200 nv_wo32(base, 0x48, lower_32_bits(ioffset));
201 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
202 nv_wo32(base, 0x54, 0x00000002);
203 nv_wo32(base, 0x84, 0x20400000);
204 nv_wo32(base, 0x94, 0x30000001);
205 nv_wo32(base, 0x9c, 0x00000100);
206 nv_wo32(base, 0xa4, 0x1f1f1f1f);
207 nv_wo32(base, 0xa8, 0x1f1f1f1f);
208 nv_wo32(base, 0xac, 0x0000001f);
209 nv_wo32(base, 0xb8, 0xf8000000);
210 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
211 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
212 bar->flush(bar);
213 return 0;
214}
215
216static int
217nvc0_fifo_chan_init(struct nouveau_object *object)
218{
219 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
220 struct nvc0_fifo_priv *priv = (void *)object->engine;
221 struct nvc0_fifo_chan *chan = (void *)object;
222 u32 chid = chan->base.chid;
223 int ret;
224
225 ret = nouveau_fifo_channel_init(&chan->base);
226 if (ret)
227 return ret;
228
229 nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
230 nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
231 nvc0_fifo_playlist_update(priv);
232 return 0;
233}
234
235static int
236nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
237{
238 struct nvc0_fifo_priv *priv = (void *)object->engine;
239 struct nvc0_fifo_chan *chan = (void *)object;
240 u32 chid = chan->base.chid;
241
242 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
243 nvc0_fifo_playlist_update(priv);
244 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
245
246 return nouveau_fifo_channel_fini(&chan->base, suspend);
247}
248
249static struct nouveau_ofuncs
250nvc0_fifo_ofuncs = {
251 .ctor = nvc0_fifo_chan_ctor,
252 .dtor = _nouveau_fifo_channel_dtor,
253 .init = nvc0_fifo_chan_init,
254 .fini = nvc0_fifo_chan_fini,
255 .rd32 = _nouveau_fifo_channel_rd32,
256 .wr32 = _nouveau_fifo_channel_wr32,
257};
258
259static struct nouveau_oclass
260nvc0_fifo_sclass[] = {
261 { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
262 {}
263};
264
265/*******************************************************************************
266 * FIFO context - instmem heap and vm setup
267 ******************************************************************************/
268
269static int
270nvc0_fifo_context_ctor(struct nouveau_object *parent,
271 struct nouveau_object *engine,
272 struct nouveau_oclass *oclass, void *data, u32 size,
273 struct nouveau_object **pobject)
274{
275 struct nvc0_fifo_base *base;
276 int ret;
277
278 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
279 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
280 NVOBJ_FLAG_HEAP, &base);
281 *pobject = nv_object(base);
282 if (ret)
283 return ret;
284
285 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
286 if (ret)
287 return ret;
288
289 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
290 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
291 nv_wo32(base, 0x0208, 0xffffffff);
292 nv_wo32(base, 0x020c, 0x000000ff);
293
294 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
295 if (ret)
296 return ret;
297
298 return 0;
299}
300
301static void
302nvc0_fifo_context_dtor(struct nouveau_object *object)
303{
304 struct nvc0_fifo_base *base = (void *)object;
305 nouveau_vm_ref(NULL, &base->vm, base->pgd);
306 nouveau_gpuobj_ref(NULL, &base->pgd);
307 nouveau_fifo_context_destroy(&base->base);
308}
309
310static struct nouveau_oclass
311nvc0_fifo_cclass = {
312 .handle = NV_ENGCTX(FIFO, 0xc0),
313 .ofuncs = &(struct nouveau_ofuncs) {
314 .ctor = nvc0_fifo_context_ctor,
315 .dtor = nvc0_fifo_context_dtor,
316 .init = _nouveau_fifo_context_init,
317 .fini = _nouveau_fifo_context_fini,
318 .rd32 = _nouveau_fifo_context_rd32,
319 .wr32 = _nouveau_fifo_context_wr32,
320 },
321};
322
323/*******************************************************************************
324 * PFIFO engine
325 ******************************************************************************/
326
327static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
328 { 0x00, "PGRAPH" },
329 { 0x03, "PEEPHOLE" },
330 { 0x04, "BAR1" },
331 { 0x05, "BAR3" },
332 { 0x07, "PFIFO" },
333 { 0x10, "PBSP" },
334 { 0x11, "PPPP" },
335 { 0x13, "PCOUNTER" },
336 { 0x14, "PVP" },
337 { 0x15, "PCOPY0" },
338 { 0x16, "PCOPY1" },
339 { 0x17, "PDAEMON" },
340 {}
341};
342
343static const struct nouveau_enum nvc0_fifo_fault_reason[] = {
344 { 0x00, "PT_NOT_PRESENT" },
345 { 0x01, "PT_TOO_SHORT" },
346 { 0x02, "PAGE_NOT_PRESENT" },
347 { 0x03, "VM_LIMIT_EXCEEDED" },
348 { 0x04, "NO_CHANNEL" },
349 { 0x05, "PAGE_SYSTEM_ONLY" },
350 { 0x06, "PAGE_READ_ONLY" },
351 { 0x0a, "COMPRESSED_SYSRAM" },
352 { 0x0c, "INVALID_STORAGE_TYPE" },
353 {}
354};
355
356static const struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
357 { 0x01, "PCOPY0" },
358 { 0x02, "PCOPY1" },
359 { 0x04, "DISPATCH" },
360 { 0x05, "CTXCTL" },
361 { 0x06, "PFIFO" },
362 { 0x07, "BAR_READ" },
363 { 0x08, "BAR_WRITE" },
364 { 0x0b, "PVP" },
365 { 0x0c, "PPPP" },
366 { 0x0d, "PBSP" },
367 { 0x11, "PCOUNTER" },
368 { 0x12, "PDAEMON" },
369 { 0x14, "CCACHE" },
370 { 0x15, "CCACHE_POST" },
371 {}
372};
373
374static const struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
375 { 0x01, "TEX" },
376 { 0x0c, "ESETUP" },
377 { 0x0e, "CTXCTL" },
378 { 0x0f, "PROP" },
379 {}
380};
381
382static const struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
383/* { 0x00008000, "" } seen with null ib push */
384 { 0x00200000, "ILLEGAL_MTHD" },
385 { 0x00800000, "EMPTY_SUBC" },
386 {}
387};
388
389static void
390nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
391{
392 u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10));
393 u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10));
394 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
395 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
396 u32 client = (stat & 0x00001f00) >> 8;
397
398 switch (unit) {
399 case 3: /* PEEPHOLE */
400 nv_mask(priv, 0x001718, 0x00000000, 0x00000000);
401 break;
402 case 4: /* BAR1 */
403 nv_mask(priv, 0x001704, 0x00000000, 0x00000000);
404 break;
405 case 5: /* BAR3 */
406 nv_mask(priv, 0x001714, 0x00000000, 0x00000000);
407 break;
408 default:
409 break;
410 }
411
412 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
413 "write" : "read", (u64)vahi << 32 | valo);
414 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
415 printk("] from ");
416 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
417 if (stat & 0x00000040) {
418 printk("/");
419 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
420 } else {
421 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
422 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
423 }
424 printk(" on channel 0x%010llx\n", (u64)inst << 12);
425}
426
427static int
428nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
429{
430 struct nvc0_fifo_chan *chan = NULL;
431 struct nouveau_handle *bind;
432 unsigned long flags;
433 int ret = -EINVAL;
434
435 spin_lock_irqsave(&priv->base.lock, flags);
436 if (likely(chid >= priv->base.min && chid <= priv->base.max))
437 chan = (void *)priv->base.channel[chid];
438 if (unlikely(!chan))
439 goto out;
440
441 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
442 if (likely(bind)) {
443 if (!mthd || !nv_call(bind->object, mthd, data))
444 ret = 0;
445 nouveau_namedb_put(bind);
446 }
447
448out:
449 spin_unlock_irqrestore(&priv->base.lock, flags);
450 return ret;
451}
452
453static void
454nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
455{
456 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
457 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
458 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
459 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
460 u32 subc = (addr & 0x00070000) >> 16;
461 u32 mthd = (addr & 0x00003ffc);
462 u32 show = stat;
463
464 if (stat & 0x00200000) {
465 if (mthd == 0x0054) {
466 if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
467 show &= ~0x00200000;
468 }
469 }
470
471 if (stat & 0x00800000) {
472 if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
473 show &= ~0x00800000;
474 }
475
476 if (show) {
477 nv_error(priv, "SUBFIFO%d:", unit);
478 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
479 printk("\n");
480 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
481 "data 0x%08x\n",
482 unit, chid, subc, mthd, data);
483 }
484
485 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
486 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
487}
488
489static void
490nvc0_fifo_intr(struct nouveau_subdev *subdev)
491{
492 struct nvc0_fifo_priv *priv = (void *)subdev;
493 u32 mask = nv_rd32(priv, 0x002140);
494 u32 stat = nv_rd32(priv, 0x002100) & mask;
495
496 if (stat & 0x00000100) {
497 nv_info(priv, "unknown status 0x00000100\n");
498 nv_wr32(priv, 0x002100, 0x00000100);
499 stat &= ~0x00000100;
500 }
501
502 if (stat & 0x10000000) {
503 u32 units = nv_rd32(priv, 0x00259c);
504 u32 u = units;
505
506 while (u) {
507 int i = ffs(u) - 1;
508 nvc0_fifo_isr_vm_fault(priv, i);
509 u &= ~(1 << i);
510 }
511
512 nv_wr32(priv, 0x00259c, units);
513 stat &= ~0x10000000;
514 }
515
516 if (stat & 0x20000000) {
517 u32 units = nv_rd32(priv, 0x0025a0);
518 u32 u = units;
519
520 while (u) {
521 int i = ffs(u) - 1;
522 nvc0_fifo_isr_subfifo_intr(priv, i);
523 u &= ~(1 << i);
524 }
525
526 nv_wr32(priv, 0x0025a0, units);
527 stat &= ~0x20000000;
528 }
529
530 if (stat & 0x40000000) {
531 nv_warn(priv, "unknown status 0x40000000\n");
532 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
533 stat &= ~0x40000000;
534 }
535
536 if (stat) {
537 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
538 nv_wr32(priv, 0x002100, stat);
539 nv_wr32(priv, 0x002140, 0);
540 }
541}
542
543static int
544nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
545 struct nouveau_oclass *oclass, void *data, u32 size,
546 struct nouveau_object **pobject)
547{
548 struct nvc0_fifo_priv *priv;
549 int ret;
550
551 ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
552 *pobject = nv_object(priv);
553 if (ret)
554 return ret;
555
556 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
557 &priv->playlist[0]);
558 if (ret)
559 return ret;
560
561 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
562 &priv->playlist[1]);
563 if (ret)
564 return ret;
565
566 ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
567 &priv->user.mem);
568 if (ret)
569 return ret;
570
571 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
572 &priv->user.bar);
573 if (ret)
574 return ret;
575
576 nv_subdev(priv)->unit = 0x00000100;
577 nv_subdev(priv)->intr = nvc0_fifo_intr;
578 nv_engine(priv)->cclass = &nvc0_fifo_cclass;
579 nv_engine(priv)->sclass = nvc0_fifo_sclass;
580 return 0;
581}
582
583static void
584nvc0_fifo_dtor(struct nouveau_object *object)
585{
586 struct nvc0_fifo_priv *priv = (void *)object;
587
588 nouveau_gpuobj_unmap(&priv->user.bar);
589 nouveau_gpuobj_ref(NULL, &priv->user.mem);
590 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
591 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
592
593 nouveau_fifo_destroy(&priv->base);
594}
595
596static int
597nvc0_fifo_init(struct nouveau_object *object)
598{
599 struct nvc0_fifo_priv *priv = (void *)object;
600 int ret, i;
601
602 ret = nouveau_fifo_init(&priv->base);
603 if (ret)
604 return ret;
605
606 nv_wr32(priv, 0x000204, 0xffffffff);
607 nv_wr32(priv, 0x002204, 0xffffffff);
608
609 priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
610 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
611
612 /* assign engines to subfifos */
613 if (priv->spoon_nr >= 3) {
614 nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
615 nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
616 nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
617 nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
618 nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
619 nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
620 }
621
622 /* PSUBFIFO[n] */
623 for (i = 0; i < priv->spoon_nr; i++) {
624 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
625 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
626 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
627 }
628
629 nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
630 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
631
632 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
633 nv_wr32(priv, 0x002100, 0xffffffff);
634 nv_wr32(priv, 0x002140, 0xbfffffff);
635 return 0;
636}
637
638struct nouveau_oclass
639nvc0_fifo_oclass = {
640 .handle = NV_ENGINE(FIFO, 0xc0),
641 .ofuncs = &(struct nouveau_ofuncs) {
642 .ctor = nvc0_fifo_ctor,
643 .dtor = nvc0_fifo_dtor,
644 .init = nvc0_fifo_init,
645 .fini = _nouveau_fifo_fini,
646 },
647};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
new file mode 100644
index 000000000000..36e81b6fafbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -0,0 +1,628 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/client.h>
26#include <core/handle.h>
27#include <core/namedb.h>
28#include <core/gpuobj.h>
29#include <core/engctx.h>
30#include <core/class.h>
31#include <core/math.h>
32#include <core/enum.h>
33
34#include <subdev/timer.h>
35#include <subdev/bar.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40
41#define _(a,b) { (a), ((1 << (a)) | (b)) }
42static const struct {
43 int subdev;
44 u32 mask;
45} fifo_engine[] = {
46 _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)),
47 _(NVDEV_ENGINE_VP , 0),
48 _(NVDEV_ENGINE_PPP , 0),
49 _(NVDEV_ENGINE_BSP , 0),
50 _(NVDEV_ENGINE_COPY0 , 0),
51 _(NVDEV_ENGINE_COPY1 , 0),
52 _(NVDEV_ENGINE_VENC , 0),
53};
54#undef _
55#define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
56
57struct nve0_fifo_engn {
58 struct nouveau_gpuobj *playlist[2];
59 int cur_playlist;
60};
61
62struct nve0_fifo_priv {
63 struct nouveau_fifo base;
64 struct nve0_fifo_engn engine[FIFO_ENGINE_NR];
65 struct {
66 struct nouveau_gpuobj *mem;
67 struct nouveau_vma bar;
68 } user;
69 int spoon_nr;
70};
71
72struct nve0_fifo_base {
73 struct nouveau_fifo_base base;
74 struct nouveau_gpuobj *pgd;
75 struct nouveau_vm *vm;
76};
77
78struct nve0_fifo_chan {
79 struct nouveau_fifo_chan base;
80 u32 engine;
81};
82
83/*******************************************************************************
84 * FIFO channel objects
85 ******************************************************************************/
86
87static void
88nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
89{
90 struct nouveau_bar *bar = nouveau_bar(priv);
91 struct nve0_fifo_engn *engn = &priv->engine[engine];
92 struct nouveau_gpuobj *cur;
93 u32 match = (engine << 16) | 0x00000001;
94 int i, p;
95
96 cur = engn->playlist[engn->cur_playlist];
97 if (unlikely(cur == NULL)) {
98 int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
99 0x8000, 0x1000, 0, &cur);
100 if (ret) {
101 nv_error(priv, "playlist alloc failed\n");
102 return;
103 }
104
105 engn->playlist[engn->cur_playlist] = cur;
106 }
107
108 engn->cur_playlist = !engn->cur_playlist;
109
110 for (i = 0, p = 0; i < priv->base.max; i++) {
111 u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
112 if (ctrl != match)
113 continue;
114 nv_wo32(cur, p + 0, i);
115 nv_wo32(cur, p + 4, 0x00000000);
116 p += 8;
117 }
118 bar->flush(bar);
119
120 nv_wr32(priv, 0x002270, cur->addr >> 12);
121 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
122 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
123 nv_error(priv, "playlist %d update timeout\n", engine);
124}
125
126static int
127nve0_fifo_context_attach(struct nouveau_object *parent,
128 struct nouveau_object *object)
129{
130 struct nouveau_bar *bar = nouveau_bar(parent);
131 struct nve0_fifo_base *base = (void *)parent->parent;
132 struct nouveau_engctx *ectx = (void *)object;
133 u32 addr;
134 int ret;
135
136 switch (nv_engidx(object->engine)) {
137 case NVDEV_ENGINE_SW : return 0;
138 case NVDEV_ENGINE_GR :
139 case NVDEV_ENGINE_COPY0:
140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
141 default:
142 return -EINVAL;
143 }
144
145 if (!ectx->vma.node) {
146 ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
147 NV_MEM_ACCESS_RW, &ectx->vma);
148 if (ret)
149 return ret;
150
151 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
152 }
153
154 nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
155 nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
156 bar->flush(bar);
157 return 0;
158}
159
160static int
161nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
162 struct nouveau_object *object)
163{
164 struct nouveau_bar *bar = nouveau_bar(parent);
165 struct nve0_fifo_priv *priv = (void *)parent->engine;
166 struct nve0_fifo_base *base = (void *)parent->parent;
167 struct nve0_fifo_chan *chan = (void *)parent;
168 u32 addr;
169
170 switch (nv_engidx(object->engine)) {
171 case NVDEV_ENGINE_SW : return 0;
172 case NVDEV_ENGINE_GR :
173 case NVDEV_ENGINE_COPY0:
174 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
175 default:
176 return -EINVAL;
177 }
178
179 nv_wo32(base, addr + 0x00, 0x00000000);
180 nv_wo32(base, addr + 0x04, 0x00000000);
181 bar->flush(bar);
182
183 nv_wr32(priv, 0x002634, chan->base.chid);
184 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
185 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
186 if (suspend)
187 return -EBUSY;
188 }
189
190 return 0;
191}
192
193static int
194nve0_fifo_chan_ctor(struct nouveau_object *parent,
195 struct nouveau_object *engine,
196 struct nouveau_oclass *oclass, void *data, u32 size,
197 struct nouveau_object **pobject)
198{
199 struct nouveau_bar *bar = nouveau_bar(parent);
200 struct nve0_fifo_priv *priv = (void *)engine;
201 struct nve0_fifo_base *base = (void *)parent;
202 struct nve0_fifo_chan *chan;
203 struct nve0_channel_ind_class *args = data;
204 u64 usermem, ioffset, ilength;
205 int ret, i;
206
207 if (size < sizeof(*args))
208 return -EINVAL;
209
210 for (i = 0; i < FIFO_ENGINE_NR; i++) {
211 if (args->engine & (1 << i)) {
212 if (nouveau_engine(parent, fifo_engine[i].subdev)) {
213 args->engine = (1 << i);
214 break;
215 }
216 }
217 }
218
219 if (i == FIFO_ENGINE_NR)
220 return -ENODEV;
221
222 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
223 priv->user.bar.offset, 0x200,
224 args->pushbuf,
225 fifo_engine[i].mask, &chan);
226 *pobject = nv_object(chan);
227 if (ret)
228 return ret;
229
230 nv_parent(chan)->context_attach = nve0_fifo_context_attach;
231 nv_parent(chan)->context_detach = nve0_fifo_context_detach;
232 chan->engine = i;
233
234 usermem = chan->base.chid * 0x200;
235 ioffset = args->ioffset;
236 ilength = log2i(args->ilength / 8);
237
238 for (i = 0; i < 0x200; i += 4)
239 nv_wo32(priv->user.mem, usermem + i, 0x00000000);
240
241 nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
242 nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
243 nv_wo32(base, 0x10, 0x0000face);
244 nv_wo32(base, 0x30, 0xfffff902);
245 nv_wo32(base, 0x48, lower_32_bits(ioffset));
246 nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
247 nv_wo32(base, 0x84, 0x20400000);
248 nv_wo32(base, 0x94, 0x30000001);
249 nv_wo32(base, 0x9c, 0x00000100);
250 nv_wo32(base, 0xac, 0x0000001f);
251 nv_wo32(base, 0xe8, chan->base.chid);
252 nv_wo32(base, 0xb8, 0xf8000000);
253 nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
254 nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
255 bar->flush(bar);
256 return 0;
257}
258
259static int
260nve0_fifo_chan_init(struct nouveau_object *object)
261{
262 struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
263 struct nve0_fifo_priv *priv = (void *)object->engine;
264 struct nve0_fifo_chan *chan = (void *)object;
265 u32 chid = chan->base.chid;
266 int ret;
267
268 ret = nouveau_fifo_channel_init(&chan->base);
269 if (ret)
270 return ret;
271
272 nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16);
273 nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
274 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
275 nve0_fifo_playlist_update(priv, chan->engine);
276 nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
277 return 0;
278}
279
280static int
281nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
282{
283 struct nve0_fifo_priv *priv = (void *)object->engine;
284 struct nve0_fifo_chan *chan = (void *)object;
285 u32 chid = chan->base.chid;
286
287 nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
288 nve0_fifo_playlist_update(priv, chan->engine);
289 nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
290
291 return nouveau_fifo_channel_fini(&chan->base, suspend);
292}
293
294static struct nouveau_ofuncs
295nve0_fifo_ofuncs = {
296 .ctor = nve0_fifo_chan_ctor,
297 .dtor = _nouveau_fifo_channel_dtor,
298 .init = nve0_fifo_chan_init,
299 .fini = nve0_fifo_chan_fini,
300 .rd32 = _nouveau_fifo_channel_rd32,
301 .wr32 = _nouveau_fifo_channel_wr32,
302};
303
304static struct nouveau_oclass
305nve0_fifo_sclass[] = {
306 { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
307 {}
308};
309
310/*******************************************************************************
311 * FIFO context - instmem heap and vm setup
312 ******************************************************************************/
313
314static int
315nve0_fifo_context_ctor(struct nouveau_object *parent,
316 struct nouveau_object *engine,
317 struct nouveau_oclass *oclass, void *data, u32 size,
318 struct nouveau_object **pobject)
319{
320 struct nve0_fifo_base *base;
321 int ret;
322
323 ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
324 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
325 *pobject = nv_object(base);
326 if (ret)
327 return ret;
328
329 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
330 if (ret)
331 return ret;
332
333 nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
334 nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
335 nv_wo32(base, 0x0208, 0xffffffff);
336 nv_wo32(base, 0x020c, 0x000000ff);
337
338 ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
339 if (ret)
340 return ret;
341
342 return 0;
343}
344
345static void
346nve0_fifo_context_dtor(struct nouveau_object *object)
347{
348 struct nve0_fifo_base *base = (void *)object;
349 nouveau_vm_ref(NULL, &base->vm, base->pgd);
350 nouveau_gpuobj_ref(NULL, &base->pgd);
351 nouveau_fifo_context_destroy(&base->base);
352}
353
354static struct nouveau_oclass
355nve0_fifo_cclass = {
356 .handle = NV_ENGCTX(FIFO, 0xe0),
357 .ofuncs = &(struct nouveau_ofuncs) {
358 .ctor = nve0_fifo_context_ctor,
359 .dtor = nve0_fifo_context_dtor,
360 .init = _nouveau_fifo_context_init,
361 .fini = _nouveau_fifo_context_fini,
362 .rd32 = _nouveau_fifo_context_rd32,
363 .wr32 = _nouveau_fifo_context_wr32,
364 },
365};
366
367/*******************************************************************************
368 * PFIFO engine
369 ******************************************************************************/
370
371static const struct nouveau_enum nve0_fifo_fault_unit[] = {
372 {}
373};
374
375static const struct nouveau_enum nve0_fifo_fault_reason[] = {
376 { 0x00, "PT_NOT_PRESENT" },
377 { 0x01, "PT_TOO_SHORT" },
378 { 0x02, "PAGE_NOT_PRESENT" },
379 { 0x03, "VM_LIMIT_EXCEEDED" },
380 { 0x04, "NO_CHANNEL" },
381 { 0x05, "PAGE_SYSTEM_ONLY" },
382 { 0x06, "PAGE_READ_ONLY" },
383 { 0x0a, "COMPRESSED_SYSRAM" },
384 { 0x0c, "INVALID_STORAGE_TYPE" },
385 {}
386};
387
388static const struct nouveau_enum nve0_fifo_fault_hubclient[] = {
389 {}
390};
391
392static const struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
393 {}
394};
395
396static const struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
397 { 0x00200000, "ILLEGAL_MTHD" },
398 { 0x00800000, "EMPTY_SUBC" },
399 {}
400};
401
402static void
403nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
404{
405 u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
406 u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
407 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
408 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
409 u32 client = (stat & 0x00001f00) >> 8;
410
411 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
412 "write" : "read", (u64)vahi << 32 | valo);
413 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
414 printk("] from ");
415 nouveau_enum_print(nve0_fifo_fault_unit, unit);
416 if (stat & 0x00000040) {
417 printk("/");
418 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
419 } else {
420 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
421 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
422 }
423 printk(" on channel 0x%010llx\n", (u64)inst << 12);
424}
425
426static int
427nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
428{
429 struct nve0_fifo_chan *chan = NULL;
430 struct nouveau_handle *bind;
431 unsigned long flags;
432 int ret = -EINVAL;
433
434 spin_lock_irqsave(&priv->base.lock, flags);
435 if (likely(chid >= priv->base.min && chid <= priv->base.max))
436 chan = (void *)priv->base.channel[chid];
437 if (unlikely(!chan))
438 goto out;
439
440 bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
441 if (likely(bind)) {
442 if (!mthd || !nv_call(bind->object, mthd, data))
443 ret = 0;
444 nouveau_namedb_put(bind);
445 }
446
447out:
448 spin_unlock_irqrestore(&priv->base.lock, flags);
449 return ret;
450}
451
452static void
453nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
454{
455 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
456 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
457 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
458 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
459 u32 subc = (addr & 0x00070000) >> 16;
460 u32 mthd = (addr & 0x00003ffc);
461 u32 show = stat;
462
463 if (stat & 0x00200000) {
464 if (mthd == 0x0054) {
465 if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
466 show &= ~0x00200000;
467 }
468 }
469
470 if (stat & 0x00800000) {
471 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
472 show &= ~0x00800000;
473 }
474
475 if (show) {
476 nv_error(priv, "SUBFIFO%d:", unit);
477 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
478 printk("\n");
479 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
480 "data 0x%08x\n",
481 unit, chid, subc, mthd, data);
482 }
483
484 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
485 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
486}
487
488static void
489nve0_fifo_intr(struct nouveau_subdev *subdev)
490{
491 struct nve0_fifo_priv *priv = (void *)subdev;
492 u32 mask = nv_rd32(priv, 0x002140);
493 u32 stat = nv_rd32(priv, 0x002100) & mask;
494
495 if (stat & 0x00000100) {
496 nv_warn(priv, "unknown status 0x00000100\n");
497 nv_wr32(priv, 0x002100, 0x00000100);
498 stat &= ~0x00000100;
499 }
500
501 if (stat & 0x10000000) {
502 u32 units = nv_rd32(priv, 0x00259c);
503 u32 u = units;
504
505 while (u) {
506 int i = ffs(u) - 1;
507 nve0_fifo_isr_vm_fault(priv, i);
508 u &= ~(1 << i);
509 }
510
511 nv_wr32(priv, 0x00259c, units);
512 stat &= ~0x10000000;
513 }
514
515 if (stat & 0x20000000) {
516 u32 units = nv_rd32(priv, 0x0025a0);
517 u32 u = units;
518
519 while (u) {
520 int i = ffs(u) - 1;
521 nve0_fifo_isr_subfifo_intr(priv, i);
522 u &= ~(1 << i);
523 }
524
525 nv_wr32(priv, 0x0025a0, units);
526 stat &= ~0x20000000;
527 }
528
529 if (stat & 0x40000000) {
530 nv_warn(priv, "unknown status 0x40000000\n");
531 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
532 stat &= ~0x40000000;
533 }
534
535 if (stat) {
536 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
537 nv_wr32(priv, 0x002100, stat);
538 nv_wr32(priv, 0x002140, 0);
539 }
540}
541
542static int
543nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
544 struct nouveau_oclass *oclass, void *data, u32 size,
545 struct nouveau_object **pobject)
546{
547 struct nve0_fifo_priv *priv;
548 int ret;
549
550 ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
551 *pobject = nv_object(priv);
552 if (ret)
553 return ret;
554
555 ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
556 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
557 if (ret)
558 return ret;
559
560 ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
561 &priv->user.bar);
562 if (ret)
563 return ret;
564
565 nv_subdev(priv)->unit = 0x00000100;
566 nv_subdev(priv)->intr = nve0_fifo_intr;
567 nv_engine(priv)->cclass = &nve0_fifo_cclass;
568 nv_engine(priv)->sclass = nve0_fifo_sclass;
569 return 0;
570}
571
572static void
573nve0_fifo_dtor(struct nouveau_object *object)
574{
575 struct nve0_fifo_priv *priv = (void *)object;
576 int i;
577
578 nouveau_gpuobj_unmap(&priv->user.bar);
579 nouveau_gpuobj_ref(NULL, &priv->user.mem);
580
581 for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
582 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
583 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
584 }
585
586 nouveau_fifo_destroy(&priv->base);
587}
588
589static int
590nve0_fifo_init(struct nouveau_object *object)
591{
592 struct nve0_fifo_priv *priv = (void *)object;
593 int ret, i;
594
595 ret = nouveau_fifo_init(&priv->base);
596 if (ret)
597 return ret;
598
599 /* enable all available PSUBFIFOs */
600 nv_wr32(priv, 0x000204, 0xffffffff);
601 priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
602 nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
603
604 /* PSUBFIFO[n] */
605 for (i = 0; i < priv->spoon_nr; i++) {
606 nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
607 nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
608 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
609 }
610
611 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
612
613 nv_wr32(priv, 0x002a00, 0xffffffff);
614 nv_wr32(priv, 0x002100, 0xffffffff);
615 nv_wr32(priv, 0x002140, 0xbfffffff);
616 return 0;
617}
618
619struct nouveau_oclass
620nve0_fifo_oclass = {
621 .handle = NV_ENGINE(FIFO, 0xe0),
622 .ofuncs = &(struct nouveau_ofuncs) {
623 .ctor = nve0_fifo_ctor,
624 .dtor = nve0_fifo_dtor,
625 .init = nve0_fifo_init,
626 .fini = _nouveau_fifo_fini,
627 },
628};
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbda..e1947013d3bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
2#define __NOUVEAU_GRCTX_H__ 2#define __NOUVEAU_GRCTX_H__
3 3
4struct nouveau_grctx { 4struct nouveau_grctx {
5 struct drm_device *dev; 5 struct nouveau_device *device;
6 6
7 enum { 7 enum {
8 NOUVEAU_GRCTX_PROG, 8 NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
10 } mode; 10 } mode;
11 void *data; 11 void *data;
12 12
13 uint32_t ctxprog_max; 13 u32 ctxprog_max;
14 uint32_t ctxprog_len; 14 u32 ctxprog_len;
15 uint32_t ctxprog_reg; 15 u32 ctxprog_reg;
16 int ctxprog_label[32]; 16 int ctxprog_label[32];
17 uint32_t ctxvals_pos; 17 u32 ctxvals_pos;
18 uint32_t ctxvals_base; 18 u32 ctxvals_base;
19}; 19};
20 20
21static inline void 21static inline void
22cp_out(struct nouveau_grctx *ctx, uint32_t inst) 22cp_out(struct nouveau_grctx *ctx, u32 inst)
23{ 23{
24 uint32_t *ctxprog = ctx->data; 24 u32 *ctxprog = ctx->data;
25 25
26 if (ctx->mode != NOUVEAU_GRCTX_PROG) 26 if (ctx->mode != NOUVEAU_GRCTX_PROG)
27 return; 27 return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
31} 31}
32 32
33static inline void 33static inline void
34cp_lsr(struct nouveau_grctx *ctx, uint32_t val) 34cp_lsr(struct nouveau_grctx *ctx, u32 val)
35{ 35{
36 cp_out(ctx, CP_LOAD_SR | val); 36 cp_out(ctx, CP_LOAD_SR | val);
37} 37}
38 38
39static inline void 39static inline void
40cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length) 40cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
41{ 41{
42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2; 42 ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
43 43
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
55static inline void 55static inline void
56cp_name(struct nouveau_grctx *ctx, int name) 56cp_name(struct nouveau_grctx *ctx, int name)
57{ 57{
58 uint32_t *ctxprog = ctx->data; 58 u32 *ctxprog = ctx->data;
59 int i; 59 int i;
60 60
61 if (ctx->mode != NOUVEAU_GRCTX_PROG) 61 if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
115} 115}
116 116
117static inline void 117static inline void
118gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) 118gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
119{ 119{
120 if (ctx->mode != NOUVEAU_GRCTX_VALS) 120 if (ctx->mode != NOUVEAU_GRCTX_VALS)
121 return; 121 return;
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index cf115ad4dad1..e45035efb8ca 100644
--- a/drivers/gpu/drm/nouveau/nv40_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/gpuobj.h>
26
25/* NVIDIA context programs handle a number of other conditions which are 27/* NVIDIA context programs handle a number of other conditions which are
26 * not implemented in our versions. It's not clear why NVIDIA context 28 * not implemented in our versions. It's not clear why NVIDIA context
27 * programs have this code, nor whether it's strictly necessary for 29 * programs have this code, nor whether it's strictly necessary for
@@ -109,20 +111,18 @@
109#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ 111#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
110#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ 112#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
111 113
112#include <drm/drmP.h> 114#include "nv40.h"
113#include "nouveau_drv.h" 115#include "ctx.h"
114#include "nouveau_grctx.h"
115 116
116/* TODO: 117/* TODO:
117 * - get vs count from 0x1540 118 * - get vs count from 0x1540
118 */ 119 */
119 120
120static int 121static int
121nv40_graph_vs_count(struct drm_device *dev) 122nv40_graph_vs_count(struct nouveau_device *device)
122{ 123{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 124
125 switch (dev_priv->chipset) { 125 switch (device->chipset) {
126 case 0x47: 126 case 0x47:
127 case 0x49: 127 case 0x49:
128 case 0x4b: 128 case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
160static void 160static void
161nv40_graph_construct_general(struct nouveau_grctx *ctx) 161nv40_graph_construct_general(struct nouveau_grctx *ctx)
162{ 162{
163 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 163 struct nouveau_device *device = ctx->device;
164 int i; 164 int i;
165 165
166 cp_ctx(ctx, 0x4000a4, 1); 166 cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
187 cp_ctx(ctx, 0x400724, 1); 187 cp_ctx(ctx, 0x400724, 1);
188 gr_def(ctx, 0x400724, 0x02008821); 188 gr_def(ctx, 0x400724, 0x02008821);
189 cp_ctx(ctx, 0x400770, 3); 189 cp_ctx(ctx, 0x400770, 3);
190 if (dev_priv->chipset == 0x40) { 190 if (device->chipset == 0x40) {
191 cp_ctx(ctx, 0x400814, 4); 191 cp_ctx(ctx, 0x400814, 4);
192 cp_ctx(ctx, 0x400828, 5); 192 cp_ctx(ctx, 0x400828, 5);
193 cp_ctx(ctx, 0x400840, 5); 193 cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
208 gr_def(ctx, 0x4009dc, 0x80000000); 208 gr_def(ctx, 0x4009dc, 0x80000000);
209 } else { 209 } else {
210 cp_ctx(ctx, 0x400840, 20); 210 cp_ctx(ctx, 0x400840, 20);
211 if (nv44_graph_class(ctx->dev)) { 211 if (nv44_graph_class(ctx->device)) {
212 for (i = 0; i < 8; i++) 212 for (i = 0; i < 8; i++)
213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); 213 gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
214 } 214 }
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
217 gr_def(ctx, 0x400888, 0x00000040); 217 gr_def(ctx, 0x400888, 0x00000040);
218 cp_ctx(ctx, 0x400894, 11); 218 cp_ctx(ctx, 0x400894, 11);
219 gr_def(ctx, 0x400894, 0x00000040); 219 gr_def(ctx, 0x400894, 0x00000040);
220 if (!nv44_graph_class(ctx->dev)) { 220 if (!nv44_graph_class(ctx->device)) {
221 for (i = 0; i < 8; i++) 221 for (i = 0; i < 8; i++)
222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); 222 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
223 } 223 }
224 cp_ctx(ctx, 0x4008e0, 2); 224 cp_ctx(ctx, 0x4008e0, 2);
225 cp_ctx(ctx, 0x4008f8, 2); 225 cp_ctx(ctx, 0x4008f8, 2);
226 if (dev_priv->chipset == 0x4c || 226 if (device->chipset == 0x4c ||
227 (dev_priv->chipset & 0xf0) == 0x60) 227 (device->chipset & 0xf0) == 0x60)
228 cp_ctx(ctx, 0x4009f8, 1); 228 cp_ctx(ctx, 0x4009f8, 1);
229 } 229 }
230 cp_ctx(ctx, 0x400a00, 73); 230 cp_ctx(ctx, 0x400a00, 73);
231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c); 231 gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
232 cp_ctx(ctx, 0x401000, 4); 232 cp_ctx(ctx, 0x401000, 4);
233 cp_ctx(ctx, 0x405004, 1); 233 cp_ctx(ctx, 0x405004, 1);
234 switch (dev_priv->chipset) { 234 switch (device->chipset) {
235 case 0x47: 235 case 0x47:
236 case 0x49: 236 case 0x49:
237 case 0x4b: 237 case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
240 break; 240 break;
241 default: 241 default:
242 cp_ctx(ctx, 0x403440, 1); 242 cp_ctx(ctx, 0x403440, 1);
243 switch (dev_priv->chipset) { 243 switch (device->chipset) {
244 case 0x40: 244 case 0x40:
245 gr_def(ctx, 0x403440, 0x00000010); 245 gr_def(ctx, 0x403440, 0x00000010);
246 break; 246 break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
266static void 266static void
267nv40_graph_construct_state3d(struct nouveau_grctx *ctx) 267nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
268{ 268{
269 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 269 struct nouveau_device *device = ctx->device;
270 int i; 270 int i;
271 271
272 if (dev_priv->chipset == 0x40) { 272 if (device->chipset == 0x40) {
273 cp_ctx(ctx, 0x401880, 51); 273 cp_ctx(ctx, 0x401880, 51);
274 gr_def(ctx, 0x401940, 0x00000100); 274 gr_def(ctx, 0x401940, 0x00000100);
275 } else 275 } else
276 if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 || 276 if (device->chipset == 0x46 || device->chipset == 0x47 ||
277 dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { 277 device->chipset == 0x49 || device->chipset == 0x4b) {
278 cp_ctx(ctx, 0x401880, 32); 278 cp_ctx(ctx, 0x401880, 32);
279 for (i = 0; i < 16; i++) 279 for (i = 0; i < 16; i++)
280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111); 280 gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
281 if (dev_priv->chipset == 0x46) 281 if (device->chipset == 0x46)
282 cp_ctx(ctx, 0x401900, 16); 282 cp_ctx(ctx, 0x401900, 16);
283 cp_ctx(ctx, 0x401940, 3); 283 cp_ctx(ctx, 0x401940, 3);
284 } 284 }
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
289 gr_def(ctx, 0x401978, 0xffff0000); 289 gr_def(ctx, 0x401978, 0xffff0000);
290 gr_def(ctx, 0x40197c, 0x00000001); 290 gr_def(ctx, 0x40197c, 0x00000001);
291 gr_def(ctx, 0x401990, 0x46400000); 291 gr_def(ctx, 0x401990, 0x46400000);
292 if (dev_priv->chipset == 0x40) { 292 if (device->chipset == 0x40) {
293 cp_ctx(ctx, 0x4019a0, 2); 293 cp_ctx(ctx, 0x4019a0, 2);
294 cp_ctx(ctx, 0x4019ac, 5); 294 cp_ctx(ctx, 0x4019ac, 5);
295 } else { 295 } else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
297 cp_ctx(ctx, 0x4019b4, 3); 297 cp_ctx(ctx, 0x4019b4, 3);
298 } 298 }
299 gr_def(ctx, 0x4019bc, 0xffff0000); 299 gr_def(ctx, 0x4019bc, 0xffff0000);
300 switch (dev_priv->chipset) { 300 switch (device->chipset) {
301 case 0x46: 301 case 0x46:
302 case 0x47: 302 case 0x47:
303 case 0x49: 303 case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
316 for (i = 0; i < 16; i++) 316 for (i = 0; i < 16; i++)
317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); 317 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
318 gr_def(ctx, 0x401a8c, 0x4b7fffff); 318 gr_def(ctx, 0x401a8c, 0x4b7fffff);
319 if (dev_priv->chipset == 0x40) { 319 if (device->chipset == 0x40) {
320 cp_ctx(ctx, 0x401ab8, 3); 320 cp_ctx(ctx, 0x401ab8, 3);
321 } else { 321 } else {
322 cp_ctx(ctx, 0x401ab8, 1); 322 cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
327 gr_def(ctx, 0x401ad4, 0x70605040); 327 gr_def(ctx, 0x401ad4, 0x70605040);
328 gr_def(ctx, 0x401ad8, 0xb8a89888); 328 gr_def(ctx, 0x401ad8, 0xb8a89888);
329 gr_def(ctx, 0x401adc, 0xf8e8d8c8); 329 gr_def(ctx, 0x401adc, 0xf8e8d8c8);
330 cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1); 330 cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
331 gr_def(ctx, 0x401b10, 0x40100000); 331 gr_def(ctx, 0x401b10, 0x40100000);
332 cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5); 332 cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
333 gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ? 333 gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
334 0x00000004 : 0x00000000); 334 0x00000004 : 0x00000000);
335 cp_ctx(ctx, 0x401b30, 25); 335 cp_ctx(ctx, 0x401b30, 25);
336 gr_def(ctx, 0x401b34, 0x0000ffff); 336 gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
341 gr_def(ctx, 0x401b84, 0xffffffff); 341 gr_def(ctx, 0x401b84, 0xffffffff);
342 gr_def(ctx, 0x401b88, 0x00ff7000); 342 gr_def(ctx, 0x401b88, 0x00ff7000);
343 gr_def(ctx, 0x401b8c, 0x0000ffff); 343 gr_def(ctx, 0x401b8c, 0x0000ffff);
344 if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a && 344 if (device->chipset != 0x44 && device->chipset != 0x4a &&
345 dev_priv->chipset != 0x4e) 345 device->chipset != 0x4e)
346 cp_ctx(ctx, 0x401b94, 1); 346 cp_ctx(ctx, 0x401b94, 1);
347 cp_ctx(ctx, 0x401b98, 8); 347 cp_ctx(ctx, 0x401b98, 8);
348 gr_def(ctx, 0x401b9c, 0x00ff0000); 348 gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
371static void 371static void
372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) 372nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
373{ 373{
374 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 374 struct nouveau_device *device = ctx->device;
375 int i; 375 int i;
376 376
377 cp_ctx(ctx, 0x402000, 1); 377 cp_ctx(ctx, 0x402000, 1);
378 cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2); 378 cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
379 switch (dev_priv->chipset) { 379 switch (device->chipset) {
380 case 0x40: 380 case 0x40:
381 gr_def(ctx, 0x402404, 0x00000001); 381 gr_def(ctx, 0x402404, 0x00000001);
382 break; 382 break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
393 default: 393 default:
394 gr_def(ctx, 0x402404, 0x00000021); 394 gr_def(ctx, 0x402404, 0x00000021);
395 } 395 }
396 if (dev_priv->chipset != 0x40) 396 if (device->chipset != 0x40)
397 gr_def(ctx, 0x402408, 0x030c30c3); 397 gr_def(ctx, 0x402408, 0x030c30c3);
398 switch (dev_priv->chipset) { 398 switch (device->chipset) {
399 case 0x44: 399 case 0x44:
400 case 0x46: 400 case 0x46:
401 case 0x4a: 401 case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
408 default: 408 default:
409 break; 409 break;
410 } 410 }
411 cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9); 411 cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
412 gr_def(ctx, 0x402488, 0x3e020200); 412 gr_def(ctx, 0x402488, 0x3e020200);
413 gr_def(ctx, 0x40248c, 0x00ffffff); 413 gr_def(ctx, 0x40248c, 0x00ffffff);
414 switch (dev_priv->chipset) { 414 switch (device->chipset) {
415 case 0x40: 415 case 0x40:
416 gr_def(ctx, 0x402490, 0x60103f00); 416 gr_def(ctx, 0x402490, 0x60103f00);
417 break; 417 break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
428 gr_def(ctx, 0x402490, 0x0c103f00); 428 gr_def(ctx, 0x402490, 0x0c103f00);
429 break; 429 break;
430 } 430 }
431 gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ? 431 gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
432 0x00020000 : 0x00040000); 432 0x00020000 : 0x00040000);
433 cp_ctx(ctx, 0x402500, 31); 433 cp_ctx(ctx, 0x402500, 31);
434 gr_def(ctx, 0x402530, 0x00008100); 434 gr_def(ctx, 0x402530, 0x00008100);
435 if (dev_priv->chipset == 0x40) 435 if (device->chipset == 0x40)
436 cp_ctx(ctx, 0x40257c, 6); 436 cp_ctx(ctx, 0x40257c, 6);
437 cp_ctx(ctx, 0x402594, 16); 437 cp_ctx(ctx, 0x402594, 16);
438 cp_ctx(ctx, 0x402800, 17); 438 cp_ctx(ctx, 0x402800, 17);
439 gr_def(ctx, 0x402800, 0x00000001); 439 gr_def(ctx, 0x402800, 0x00000001);
440 switch (dev_priv->chipset) { 440 switch (device->chipset) {
441 case 0x47: 441 case 0x47:
442 case 0x49: 442 case 0x49:
443 case 0x4b: 443 case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
445 gr_def(ctx, 0x402864, 0x00001001); 445 gr_def(ctx, 0x402864, 0x00001001);
446 cp_ctx(ctx, 0x402870, 3); 446 cp_ctx(ctx, 0x402870, 3);
447 gr_def(ctx, 0x402878, 0x00000003); 447 gr_def(ctx, 0x402878, 0x00000003);
448 if (dev_priv->chipset != 0x47) { /* belong at end!! */ 448 if (device->chipset != 0x47) { /* belong at end!! */
449 cp_ctx(ctx, 0x402900, 1); 449 cp_ctx(ctx, 0x402900, 1);
450 cp_ctx(ctx, 0x402940, 1); 450 cp_ctx(ctx, 0x402940, 1);
451 cp_ctx(ctx, 0x402980, 1); 451 cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
470 } 470 }
471 471
472 cp_ctx(ctx, 0x402c00, 4); 472 cp_ctx(ctx, 0x402c00, 4);
473 gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ? 473 gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
474 0x80800001 : 0x00888001); 474 0x80800001 : 0x00888001);
475 switch (dev_priv->chipset) { 475 switch (device->chipset) {
476 case 0x47: 476 case 0x47:
477 case 0x49: 477 case 0x49:
478 case 0x4b: 478 case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
485 break; 485 break;
486 default: 486 default:
487 cp_ctx(ctx, 0x402c10, 4); 487 cp_ctx(ctx, 0x402c10, 4);
488 if (dev_priv->chipset == 0x40) 488 if (device->chipset == 0x40)
489 cp_ctx(ctx, 0x402c20, 36); 489 cp_ctx(ctx, 0x402c20, 36);
490 else 490 else
491 if (dev_priv->chipset <= 0x42) 491 if (device->chipset <= 0x42)
492 cp_ctx(ctx, 0x402c20, 24); 492 cp_ctx(ctx, 0x402c20, 24);
493 else 493 else
494 if (dev_priv->chipset <= 0x4a) 494 if (device->chipset <= 0x4a)
495 cp_ctx(ctx, 0x402c20, 16); 495 cp_ctx(ctx, 0x402c20, 16);
496 else 496 else
497 cp_ctx(ctx, 0x402c20, 8); 497 cp_ctx(ctx, 0x402c20, 8);
498 cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13); 498 cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
499 gr_def(ctx, 0x402cd4, 0x00000005); 499 gr_def(ctx, 0x402cd4, 0x00000005);
500 if (dev_priv->chipset != 0x40) 500 if (device->chipset != 0x40)
501 gr_def(ctx, 0x402ce0, 0x0000ffff); 501 gr_def(ctx, 0x402ce0, 0x0000ffff);
502 break; 502 break;
503 } 503 }
504 504
505 cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3); 505 cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
506 cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3); 506 cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev)); 507 cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
508 for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++) 508 for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555); 509 gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
510 510
511 if (dev_priv->chipset != 0x40) { 511 if (device->chipset != 0x40) {
512 cp_ctx(ctx, 0x403600, 1); 512 cp_ctx(ctx, 0x403600, 1);
513 gr_def(ctx, 0x403600, 0x00000001); 513 gr_def(ctx, 0x403600, 0x00000001);
514 } 514 }
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
516 516
517 cp_ctx(ctx, 0x403c18, 1); 517 cp_ctx(ctx, 0x403c18, 1);
518 gr_def(ctx, 0x403c18, 0x00000001); 518 gr_def(ctx, 0x403c18, 0x00000001);
519 switch (dev_priv->chipset) { 519 switch (device->chipset) {
520 case 0x46: 520 case 0x46:
521 case 0x47: 521 case 0x47:
522 case 0x49: 522 case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
527 gr_def(ctx, 0x405c24, 0x000e3000); 527 gr_def(ctx, 0x405c24, 0x000e3000);
528 break; 528 break;
529 } 529 }
530 if (dev_priv->chipset != 0x4e) 530 if (device->chipset != 0x4e)
531 cp_ctx(ctx, 0x405800, 11); 531 cp_ctx(ctx, 0x405800, 11);
532 cp_ctx(ctx, 0x407000, 1); 532 cp_ctx(ctx, 0x407000, 1);
533} 533}
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
535static void 535static void
536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) 536nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
537{ 537{
538 int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684; 538 int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
539 539
540 cp_out (ctx, 0x300000); 540 cp_out (ctx, 0x300000);
541 cp_lsr (ctx, len - 4); 541 cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
550static void 550static void
551nv40_graph_construct_shader(struct nouveau_grctx *ctx) 551nv40_graph_construct_shader(struct nouveau_grctx *ctx)
552{ 552{
553 struct drm_device *dev = ctx->dev; 553 struct nouveau_device *device = ctx->device;
554 struct drm_nouveau_private *dev_priv = dev->dev_private;
555 struct nouveau_gpuobj *obj = ctx->data; 554 struct nouveau_gpuobj *obj = ctx->data;
556 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; 555 int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
557 int offset, i; 556 int offset, i;
558 557
559 vs_nr = nv40_graph_vs_count(ctx->dev); 558 vs_nr = nv40_graph_vs_count(ctx->device);
560 vs_nr_b0 = 363; 559 vs_nr_b0 = 363;
561 vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64; 560 vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
562 if (dev_priv->chipset == 0x40) { 561 if (device->chipset == 0x40) {
563 b0_offset = 0x2200/4; /* 33a0 */ 562 b0_offset = 0x2200/4; /* 33a0 */
564 b1_offset = 0x55a0/4; /* 1500 */ 563 b1_offset = 0x55a0/4; /* 1500 */
565 vs_len = 0x6aa0/4; 564 vs_len = 0x6aa0/4;
566 } else 565 } else
567 if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) { 566 if (device->chipset == 0x41 || device->chipset == 0x42) {
568 b0_offset = 0x2200/4; /* 2200 */ 567 b0_offset = 0x2200/4; /* 2200 */
569 b1_offset = 0x4400/4; /* 0b00 */ 568 b1_offset = 0x4400/4; /* 0b00 */
570 vs_len = 0x4f00/4; 569 vs_len = 0x4f00/4;
571 } else { 570 } else {
572 b0_offset = 0x1d40/4; /* 2200 */ 571 b0_offset = 0x1d40/4; /* 2200 */
573 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ 572 b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
574 vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4; 573 vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
575 } 574 }
576 575
577 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); 576 cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
578 cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041); 577 cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
579 578
580 offset = ctx->ctxvals_pos; 579 offset = ctx->ctxvals_pos;
581 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); 580 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
661} 660}
662 661
663void 662void
664nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 663nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
665{ 664{
666 nv40_grctx_generate(&(struct nouveau_grctx) { 665 nv40_grctx_generate(&(struct nouveau_grctx) {
667 .dev = dev, 666 .device = device,
668 .mode = NOUVEAU_GRCTX_VALS, 667 .mode = NOUVEAU_GRCTX_VALS,
669 .data = mem, 668 .data = mem,
670 }); 669 });
671} 670}
672 671
673void 672void
674nv40_grctx_init(struct drm_device *dev, u32 *size) 673nv40_grctx_init(struct nouveau_device *device, u32 *size)
675{ 674{
676 u32 ctxprog[256], i; 675 u32 ctxprog[256], i;
677 struct nouveau_grctx ctx = { 676 struct nouveau_grctx ctx = {
678 .dev = dev, 677 .device = device,
679 .mode = NOUVEAU_GRCTX_PROG, 678 .mode = NOUVEAU_GRCTX_PROG,
680 .data = ctxprog, 679 .data = ctxprog,
681 .ctxprog_max = ARRAY_SIZE(ctxprog) 680 .ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
683 682
684 nv40_grctx_generate(&ctx); 683 nv40_grctx_generate(&ctx);
685 684
686 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); 685 nv_wr32(device, 0x400324, 0);
687 for (i = 0; i < ctx.ctxprog_len; i++) 686 for (i = 0; i < ctx.ctxprog_len; i++)
688 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]); 687 nv_wr32(device, 0x400328, ctxprog[i]);
689 *size = ctx.ctxvals_pos * 4; 688 *size = ctx.ctxvals_pos * 4;
690} 689}
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 3bb96a029d66..552fdbd45ebe 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 */ 21 */
22 22
23#include <core/gpuobj.h>
24
23#define CP_FLAG_CLEAR 0 25#define CP_FLAG_CLEAR 0
24#define CP_FLAG_SET 1 26#define CP_FLAG_SET 1
25#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) 27#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -105,9 +107,8 @@
105#define CP_SEEK_1 0x00c000ff 107#define CP_SEEK_1 0x00c000ff
106#define CP_SEEK_2 0x00c800ff 108#define CP_SEEK_2 0x00c800ff
107 109
108#include <drm/drmP.h> 110#include "nv50.h"
109#include "nouveau_drv.h" 111#include "ctx.h"
110#include "nouveau_grctx.h"
111 112
112#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
113#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) 114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
175static int 176static int
176nv50_grctx_generate(struct nouveau_grctx *ctx) 177nv50_grctx_generate(struct nouveau_grctx *ctx)
177{ 178{
178 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
179
180 switch (dev_priv->chipset) {
181 case 0x50:
182 case 0x84:
183 case 0x86:
184 case 0x92:
185 case 0x94:
186 case 0x96:
187 case 0x98:
188 case 0xa0:
189 case 0xa3:
190 case 0xa5:
191 case 0xa8:
192 case 0xaa:
193 case 0xac:
194 case 0xaf:
195 break;
196 default:
197 NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
198 "your NV%x card.\n", dev_priv->chipset);
199 NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
200 "the devs.\n");
201 return -ENOSYS;
202 }
203
204 cp_set (ctx, STATE, RUNNING); 179 cp_set (ctx, STATE, RUNNING);
205 cp_set (ctx, XFER_SWITCH, ENABLE); 180 cp_set (ctx, XFER_SWITCH, ENABLE);
206 /* decide whether we're loading/unloading the context */ 181 /* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
278} 253}
279 254
280void 255void
281nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem) 256nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
282{ 257{
283 nv50_grctx_generate(&(struct nouveau_grctx) { 258 nv50_grctx_generate(&(struct nouveau_grctx) {
284 .dev = dev, 259 .device = device,
285 .mode = NOUVEAU_GRCTX_VALS, 260 .mode = NOUVEAU_GRCTX_VALS,
286 .data = mem, 261 .data = mem,
287 }); 262 });
288} 263}
289 264
290int 265int
291nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt) 266nv50_grctx_init(struct nouveau_device *device, u32 *size)
292{ 267{
268 u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
293 struct nouveau_grctx ctx = { 269 struct nouveau_grctx ctx = {
294 .dev = dev, 270 .device = device,
295 .mode = NOUVEAU_GRCTX_PROG, 271 .mode = NOUVEAU_GRCTX_PROG,
296 .data = data, 272 .data = ctxprog,
297 .ctxprog_max = max 273 .ctxprog_max = 512,
298 }; 274 };
299 int ret;
300 275
301 ret = nv50_grctx_generate(&ctx); 276 if (!ctxprog)
302 *cnt = ctx.ctxvals_pos * 4; 277 return -ENOMEM;
303 *len = ctx.ctxprog_len; 278 nv50_grctx_generate(&ctx);
304 return ret; 279
280 nv_wr32(device, 0x400324, 0);
281 for (i = 0; i < ctx.ctxprog_len; i++)
282 nv_wr32(device, 0x400328, ctxprog[i]);
283 *size = ctx.ctxvals_pos * 4;
284 kfree(ctxprog);
285 return 0;
305} 286}
306 287
307/* 288/*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
315static void 296static void
316nv50_graph_construct_mmio(struct nouveau_grctx *ctx) 297nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
317{ 298{
318 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 299 struct nouveau_device *device = ctx->device;
319 int i, j; 300 int i, j;
320 int offset, base; 301 int offset, base;
321 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 302 u32 units = nv_rd32 (ctx->device, 0x1540);
322 303
323 /* 0800: DISPATCH */ 304 /* 0800: DISPATCH */
324 cp_ctx(ctx, 0x400808, 7); 305 cp_ctx(ctx, 0x400808, 7);
325 gr_def(ctx, 0x400814, 0x00000030); 306 gr_def(ctx, 0x400814, 0x00000030);
326 cp_ctx(ctx, 0x400834, 0x32); 307 cp_ctx(ctx, 0x400834, 0x32);
327 if (dev_priv->chipset == 0x50) { 308 if (device->chipset == 0x50) {
328 gr_def(ctx, 0x400834, 0xff400040); 309 gr_def(ctx, 0x400834, 0xff400040);
329 gr_def(ctx, 0x400838, 0xfff00080); 310 gr_def(ctx, 0x400838, 0xfff00080);
330 gr_def(ctx, 0x40083c, 0xfff70090); 311 gr_def(ctx, 0x40083c, 0xfff70090);
331 gr_def(ctx, 0x400840, 0xffe806a8); 312 gr_def(ctx, 0x400840, 0xffe806a8);
332 } 313 }
333 gr_def(ctx, 0x400844, 0x00000002); 314 gr_def(ctx, 0x400844, 0x00000002);
334 if (IS_NVA3F(dev_priv->chipset)) 315 if (IS_NVA3F(device->chipset))
335 gr_def(ctx, 0x400894, 0x00001000); 316 gr_def(ctx, 0x400894, 0x00001000);
336 gr_def(ctx, 0x4008e8, 0x00000003); 317 gr_def(ctx, 0x4008e8, 0x00000003);
337 gr_def(ctx, 0x4008ec, 0x00001000); 318 gr_def(ctx, 0x4008ec, 0x00001000);
338 if (dev_priv->chipset == 0x50) 319 if (device->chipset == 0x50)
339 cp_ctx(ctx, 0x400908, 0xb); 320 cp_ctx(ctx, 0x400908, 0xb);
340 else if (dev_priv->chipset < 0xa0) 321 else if (device->chipset < 0xa0)
341 cp_ctx(ctx, 0x400908, 0xc); 322 cp_ctx(ctx, 0x400908, 0xc);
342 else 323 else
343 cp_ctx(ctx, 0x400908, 0xe); 324 cp_ctx(ctx, 0x400908, 0xe);
344 325
345 if (dev_priv->chipset >= 0xa0) 326 if (device->chipset >= 0xa0)
346 cp_ctx(ctx, 0x400b00, 0x1); 327 cp_ctx(ctx, 0x400b00, 0x1);
347 if (IS_NVA3F(dev_priv->chipset)) { 328 if (IS_NVA3F(device->chipset)) {
348 cp_ctx(ctx, 0x400b10, 0x1); 329 cp_ctx(ctx, 0x400b10, 0x1);
349 gr_def(ctx, 0x400b10, 0x0001629d); 330 gr_def(ctx, 0x400b10, 0x0001629d);
350 cp_ctx(ctx, 0x400b20, 0x1); 331 cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
358 gr_def(ctx, 0x400c08, 0x0000fe0c); 339 gr_def(ctx, 0x400c08, 0x0000fe0c);
359 340
360 /* 1000 */ 341 /* 1000 */
361 if (dev_priv->chipset < 0xa0) { 342 if (device->chipset < 0xa0) {
362 cp_ctx(ctx, 0x401008, 0x4); 343 cp_ctx(ctx, 0x401008, 0x4);
363 gr_def(ctx, 0x401014, 0x00001000); 344 gr_def(ctx, 0x401014, 0x00001000);
364 } else if (!IS_NVA3F(dev_priv->chipset)) { 345 } else if (!IS_NVA3F(device->chipset)) {
365 cp_ctx(ctx, 0x401008, 0x5); 346 cp_ctx(ctx, 0x401008, 0x5);
366 gr_def(ctx, 0x401018, 0x00001000); 347 gr_def(ctx, 0x401018, 0x00001000);
367 } else { 348 } else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
372 /* 1400 */ 353 /* 1400 */
373 cp_ctx(ctx, 0x401400, 0x8); 354 cp_ctx(ctx, 0x401400, 0x8);
374 cp_ctx(ctx, 0x401424, 0x3); 355 cp_ctx(ctx, 0x401424, 0x3);
375 if (dev_priv->chipset == 0x50) 356 if (device->chipset == 0x50)
376 gr_def(ctx, 0x40142c, 0x0001fd87); 357 gr_def(ctx, 0x40142c, 0x0001fd87);
377 else 358 else
378 gr_def(ctx, 0x40142c, 0x00000187); 359 gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
382 /* 1800: STREAMOUT */ 363 /* 1800: STREAMOUT */
383 cp_ctx(ctx, 0x401814, 0x1); 364 cp_ctx(ctx, 0x401814, 0x1);
384 gr_def(ctx, 0x401814, 0x000000ff); 365 gr_def(ctx, 0x401814, 0x000000ff);
385 if (dev_priv->chipset == 0x50) { 366 if (device->chipset == 0x50) {
386 cp_ctx(ctx, 0x40181c, 0xe); 367 cp_ctx(ctx, 0x40181c, 0xe);
387 gr_def(ctx, 0x401850, 0x00000004); 368 gr_def(ctx, 0x401850, 0x00000004);
388 } else if (dev_priv->chipset < 0xa0) { 369 } else if (device->chipset < 0xa0) {
389 cp_ctx(ctx, 0x40181c, 0xf); 370 cp_ctx(ctx, 0x40181c, 0xf);
390 gr_def(ctx, 0x401854, 0x00000004); 371 gr_def(ctx, 0x401854, 0x00000004);
391 } else { 372 } else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
395 376
396 /* 1C00 */ 377 /* 1C00 */
397 cp_ctx(ctx, 0x401c00, 0x1); 378 cp_ctx(ctx, 0x401c00, 0x1);
398 switch (dev_priv->chipset) { 379 switch (device->chipset) {
399 case 0x50: 380 case 0x50:
400 gr_def(ctx, 0x401c00, 0x0001005f); 381 gr_def(ctx, 0x401c00, 0x0001005f);
401 break; 382 break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
424 405
425 /* 2400 */ 406 /* 2400 */
426 cp_ctx(ctx, 0x402400, 0x1); 407 cp_ctx(ctx, 0x402400, 0x1);
427 if (dev_priv->chipset == 0x50) 408 if (device->chipset == 0x50)
428 cp_ctx(ctx, 0x402408, 0x1); 409 cp_ctx(ctx, 0x402408, 0x1);
429 else 410 else
430 cp_ctx(ctx, 0x402408, 0x2); 411 cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
432 413
433 /* 2800: CSCHED */ 414 /* 2800: CSCHED */
434 cp_ctx(ctx, 0x402800, 0x1); 415 cp_ctx(ctx, 0x402800, 0x1);
435 if (dev_priv->chipset == 0x50) 416 if (device->chipset == 0x50)
436 gr_def(ctx, 0x402800, 0x00000006); 417 gr_def(ctx, 0x402800, 0x00000006);
437 418
438 /* 2C00: ZCULL */ 419 /* 2C00: ZCULL */
439 cp_ctx(ctx, 0x402c08, 0x6); 420 cp_ctx(ctx, 0x402c08, 0x6);
440 if (dev_priv->chipset != 0x50) 421 if (device->chipset != 0x50)
441 gr_def(ctx, 0x402c14, 0x01000000); 422 gr_def(ctx, 0x402c14, 0x01000000);
442 gr_def(ctx, 0x402c18, 0x000000ff); 423 gr_def(ctx, 0x402c18, 0x000000ff);
443 if (dev_priv->chipset == 0x50) 424 if (device->chipset == 0x50)
444 cp_ctx(ctx, 0x402ca0, 0x1); 425 cp_ctx(ctx, 0x402ca0, 0x1);
445 else 426 else
446 cp_ctx(ctx, 0x402ca0, 0x2); 427 cp_ctx(ctx, 0x402ca0, 0x2);
447 if (dev_priv->chipset < 0xa0) 428 if (device->chipset < 0xa0)
448 gr_def(ctx, 0x402ca0, 0x00000400); 429 gr_def(ctx, 0x402ca0, 0x00000400);
449 else if (!IS_NVA3F(dev_priv->chipset)) 430 else if (!IS_NVA3F(device->chipset))
450 gr_def(ctx, 0x402ca0, 0x00000800); 431 gr_def(ctx, 0x402ca0, 0x00000800);
451 else 432 else
452 gr_def(ctx, 0x402ca0, 0x00000400); 433 gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
457 gr_def(ctx, 0x403004, 0x00000001); 438 gr_def(ctx, 0x403004, 0x00000001);
458 439
459 /* 3400 */ 440 /* 3400 */
460 if (dev_priv->chipset >= 0xa0) { 441 if (device->chipset >= 0xa0) {
461 cp_ctx(ctx, 0x403404, 0x1); 442 cp_ctx(ctx, 0x403404, 0x1);
462 gr_def(ctx, 0x403404, 0x00000001); 443 gr_def(ctx, 0x403404, 0x00000001);
463 } 444 }
464 445
465 /* 5000: CCACHE */ 446 /* 5000: CCACHE */
466 cp_ctx(ctx, 0x405000, 0x1); 447 cp_ctx(ctx, 0x405000, 0x1);
467 switch (dev_priv->chipset) { 448 switch (device->chipset) {
468 case 0x50: 449 case 0x50:
469 gr_def(ctx, 0x405000, 0x00300080); 450 gr_def(ctx, 0x405000, 0x00300080);
470 break; 451 break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
493 cp_ctx(ctx, 0x40502c, 0x1); 474 cp_ctx(ctx, 0x40502c, 0x1);
494 475
495 /* 6000? */ 476 /* 6000? */
496 if (dev_priv->chipset == 0x50) 477 if (device->chipset == 0x50)
497 cp_ctx(ctx, 0x4063e0, 0x1); 478 cp_ctx(ctx, 0x4063e0, 0x1);
498 479
499 /* 6800: M2MF */ 480 /* 6800: M2MF */
500 if (dev_priv->chipset < 0x90) { 481 if (device->chipset < 0x90) {
501 cp_ctx(ctx, 0x406814, 0x2b); 482 cp_ctx(ctx, 0x406814, 0x2b);
502 gr_def(ctx, 0x406818, 0x00000f80); 483 gr_def(ctx, 0x406818, 0x00000f80);
503 gr_def(ctx, 0x406860, 0x007f0080); 484 gr_def(ctx, 0x406860, 0x007f0080);
504 gr_def(ctx, 0x40689c, 0x007f0080); 485 gr_def(ctx, 0x40689c, 0x007f0080);
505 } else { 486 } else {
506 cp_ctx(ctx, 0x406814, 0x4); 487 cp_ctx(ctx, 0x406814, 0x4);
507 if (dev_priv->chipset == 0x98) 488 if (device->chipset == 0x98)
508 gr_def(ctx, 0x406818, 0x00000f80); 489 gr_def(ctx, 0x406818, 0x00000f80);
509 else 490 else
510 gr_def(ctx, 0x406818, 0x00001f80); 491 gr_def(ctx, 0x406818, 0x00001f80);
511 if (IS_NVA3F(dev_priv->chipset)) 492 if (IS_NVA3F(device->chipset))
512 gr_def(ctx, 0x40681c, 0x00000030); 493 gr_def(ctx, 0x40681c, 0x00000030);
513 cp_ctx(ctx, 0x406830, 0x3); 494 cp_ctx(ctx, 0x406830, 0x3);
514 } 495 }
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
517 for (i = 0; i < 8; i++) { 498 for (i = 0; i < 8; i++) {
518 if (units & (1<<(i+16))) { 499 if (units & (1<<(i+16))) {
519 cp_ctx(ctx, 0x407000 + (i<<8), 3); 500 cp_ctx(ctx, 0x407000 + (i<<8), 3);
520 if (dev_priv->chipset == 0x50) 501 if (device->chipset == 0x50)
521 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); 502 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
522 else if (dev_priv->chipset != 0xa5) 503 else if (device->chipset != 0xa5)
523 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); 504 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
524 else 505 else
525 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); 506 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
526 gr_def(ctx, 0x407004 + (i<<8), 0x89058001); 507 gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
527 508
528 if (dev_priv->chipset == 0x50) { 509 if (device->chipset == 0x50) {
529 cp_ctx(ctx, 0x407010 + (i<<8), 1); 510 cp_ctx(ctx, 0x407010 + (i<<8), 1);
530 } else if (dev_priv->chipset < 0xa0) { 511 } else if (device->chipset < 0xa0) {
531 cp_ctx(ctx, 0x407010 + (i<<8), 2); 512 cp_ctx(ctx, 0x407010 + (i<<8), 2);
532 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 513 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
533 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); 514 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
534 } else { 515 } else {
535 cp_ctx(ctx, 0x407010 + (i<<8), 3); 516 cp_ctx(ctx, 0x407010 + (i<<8), 3);
536 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); 517 gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
537 if (dev_priv->chipset != 0xa5) 518 if (device->chipset != 0xa5)
538 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); 519 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
539 else 520 else
540 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); 521 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
541 } 522 }
542 523
543 cp_ctx(ctx, 0x407080 + (i<<8), 4); 524 cp_ctx(ctx, 0x407080 + (i<<8), 4);
544 if (dev_priv->chipset != 0xa5) 525 if (device->chipset != 0xa5)
545 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); 526 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
546 else 527 else
547 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); 528 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
548 if (dev_priv->chipset == 0x50) 529 if (device->chipset == 0x50)
549 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); 530 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
550 else 531 else
551 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); 532 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
552 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); 533 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
553 534
554 if (dev_priv->chipset < 0xa0) 535 if (device->chipset < 0xa0)
555 cp_ctx(ctx, 0x407094 + (i<<8), 1); 536 cp_ctx(ctx, 0x407094 + (i<<8), 1);
556 else if (!IS_NVA3F(dev_priv->chipset)) 537 else if (!IS_NVA3F(device->chipset))
557 cp_ctx(ctx, 0x407094 + (i<<8), 3); 538 cp_ctx(ctx, 0x407094 + (i<<8), 3);
558 else { 539 else {
559 cp_ctx(ctx, 0x407094 + (i<<8), 4); 540 cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
563 } 544 }
564 545
565 cp_ctx(ctx, 0x407c00, 0x3); 546 cp_ctx(ctx, 0x407c00, 0x3);
566 if (dev_priv->chipset < 0x90) 547 if (device->chipset < 0x90)
567 gr_def(ctx, 0x407c00, 0x00010040); 548 gr_def(ctx, 0x407c00, 0x00010040);
568 else if (dev_priv->chipset < 0xa0) 549 else if (device->chipset < 0xa0)
569 gr_def(ctx, 0x407c00, 0x00390040); 550 gr_def(ctx, 0x407c00, 0x00390040);
570 else 551 else
571 gr_def(ctx, 0x407c00, 0x003d0040); 552 gr_def(ctx, 0x407c00, 0x003d0040);
572 gr_def(ctx, 0x407c08, 0x00000022); 553 gr_def(ctx, 0x407c08, 0x00000022);
573 if (dev_priv->chipset >= 0xa0) { 554 if (device->chipset >= 0xa0) {
574 cp_ctx(ctx, 0x407c10, 0x3); 555 cp_ctx(ctx, 0x407c10, 0x3);
575 cp_ctx(ctx, 0x407c20, 0x1); 556 cp_ctx(ctx, 0x407c20, 0x1);
576 cp_ctx(ctx, 0x407c2c, 0x1); 557 cp_ctx(ctx, 0x407c2c, 0x1);
577 } 558 }
578 559
579 if (dev_priv->chipset < 0xa0) { 560 if (device->chipset < 0xa0) {
580 cp_ctx(ctx, 0x407d00, 0x9); 561 cp_ctx(ctx, 0x407d00, 0x9);
581 } else { 562 } else {
582 cp_ctx(ctx, 0x407d00, 0x15); 563 cp_ctx(ctx, 0x407d00, 0x15);
583 } 564 }
584 if (dev_priv->chipset == 0x98) 565 if (device->chipset == 0x98)
585 gr_def(ctx, 0x407d08, 0x00380040); 566 gr_def(ctx, 0x407d08, 0x00380040);
586 else { 567 else {
587 if (dev_priv->chipset < 0x90) 568 if (device->chipset < 0x90)
588 gr_def(ctx, 0x407d08, 0x00010040); 569 gr_def(ctx, 0x407d08, 0x00010040);
589 else if (dev_priv->chipset < 0xa0) 570 else if (device->chipset < 0xa0)
590 gr_def(ctx, 0x407d08, 0x00390040); 571 gr_def(ctx, 0x407d08, 0x00390040);
591 else 572 else
592 gr_def(ctx, 0x407d08, 0x003d0040); 573 gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
596 /* 8000+: per-TP state */ 577 /* 8000+: per-TP state */
597 for (i = 0; i < 10; i++) { 578 for (i = 0; i < 10; i++) {
598 if (units & (1<<i)) { 579 if (units & (1<<i)) {
599 if (dev_priv->chipset < 0xa0) 580 if (device->chipset < 0xa0)
600 base = 0x408000 + (i<<12); 581 base = 0x408000 + (i<<12);
601 else 582 else
602 base = 0x408000 + (i<<11); 583 base = 0x408000 + (i<<11);
603 if (dev_priv->chipset < 0xa0) 584 if (device->chipset < 0xa0)
604 offset = base + 0xc00; 585 offset = base + 0xc00;
605 else 586 else
606 offset = base + 0x80; 587 offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
609 cp_ctx(ctx, offset + 0x08, 1); 590 cp_ctx(ctx, offset + 0x08, 1);
610 591
611 /* per-MP state */ 592 /* per-MP state */
612 for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { 593 for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
613 if (!(units & (1 << (j+24)))) continue; 594 if (!(units & (1 << (j+24)))) continue;
614 if (dev_priv->chipset < 0xa0) 595 if (device->chipset < 0xa0)
615 offset = base + 0x200 + (j<<7); 596 offset = base + 0x200 + (j<<7);
616 else 597 else
617 offset = base + 0x100 + (j<<7); 598 offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
620 gr_def(ctx, offset + 0x04, 0x00160000); 601 gr_def(ctx, offset + 0x04, 0x00160000);
621 gr_def(ctx, offset + 0x08, 0x01800000); 602 gr_def(ctx, offset + 0x08, 0x01800000);
622 gr_def(ctx, offset + 0x18, 0x0003ffff); 603 gr_def(ctx, offset + 0x18, 0x0003ffff);
623 switch (dev_priv->chipset) { 604 switch (device->chipset) {
624 case 0x50: 605 case 0x50:
625 gr_def(ctx, offset + 0x1c, 0x00080000); 606 gr_def(ctx, offset + 0x1c, 0x00080000);
626 break; 607 break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
651 break; 632 break;
652 } 633 }
653 gr_def(ctx, offset + 0x40, 0x00010401); 634 gr_def(ctx, offset + 0x40, 0x00010401);
654 if (dev_priv->chipset == 0x50) 635 if (device->chipset == 0x50)
655 gr_def(ctx, offset + 0x48, 0x00000040); 636 gr_def(ctx, offset + 0x48, 0x00000040);
656 else 637 else
657 gr_def(ctx, offset + 0x48, 0x00000078); 638 gr_def(ctx, offset + 0x48, 0x00000078);
658 gr_def(ctx, offset + 0x50, 0x000000bf); 639 gr_def(ctx, offset + 0x50, 0x000000bf);
659 gr_def(ctx, offset + 0x58, 0x00001210); 640 gr_def(ctx, offset + 0x58, 0x00001210);
660 if (dev_priv->chipset == 0x50) 641 if (device->chipset == 0x50)
661 gr_def(ctx, offset + 0x5c, 0x00000080); 642 gr_def(ctx, offset + 0x5c, 0x00000080);
662 else 643 else
663 gr_def(ctx, offset + 0x5c, 0x08000080); 644 gr_def(ctx, offset + 0x5c, 0x08000080);
664 if (dev_priv->chipset >= 0xa0) 645 if (device->chipset >= 0xa0)
665 gr_def(ctx, offset + 0x68, 0x0000003e); 646 gr_def(ctx, offset + 0x68, 0x0000003e);
666 } 647 }
667 648
668 if (dev_priv->chipset < 0xa0) 649 if (device->chipset < 0xa0)
669 cp_ctx(ctx, base + 0x300, 0x4); 650 cp_ctx(ctx, base + 0x300, 0x4);
670 else 651 else
671 cp_ctx(ctx, base + 0x300, 0x5); 652 cp_ctx(ctx, base + 0x300, 0x5);
672 if (dev_priv->chipset == 0x50) 653 if (device->chipset == 0x50)
673 gr_def(ctx, base + 0x304, 0x00007070); 654 gr_def(ctx, base + 0x304, 0x00007070);
674 else if (dev_priv->chipset < 0xa0) 655 else if (device->chipset < 0xa0)
675 gr_def(ctx, base + 0x304, 0x00027070); 656 gr_def(ctx, base + 0x304, 0x00027070);
676 else if (!IS_NVA3F(dev_priv->chipset)) 657 else if (!IS_NVA3F(device->chipset))
677 gr_def(ctx, base + 0x304, 0x01127070); 658 gr_def(ctx, base + 0x304, 0x01127070);
678 else 659 else
679 gr_def(ctx, base + 0x304, 0x05127070); 660 gr_def(ctx, base + 0x304, 0x05127070);
680 661
681 if (dev_priv->chipset < 0xa0) 662 if (device->chipset < 0xa0)
682 cp_ctx(ctx, base + 0x318, 1); 663 cp_ctx(ctx, base + 0x318, 1);
683 else 664 else
684 cp_ctx(ctx, base + 0x320, 1); 665 cp_ctx(ctx, base + 0x320, 1);
685 if (dev_priv->chipset == 0x50) 666 if (device->chipset == 0x50)
686 gr_def(ctx, base + 0x318, 0x0003ffff); 667 gr_def(ctx, base + 0x318, 0x0003ffff);
687 else if (dev_priv->chipset < 0xa0) 668 else if (device->chipset < 0xa0)
688 gr_def(ctx, base + 0x318, 0x03ffffff); 669 gr_def(ctx, base + 0x318, 0x03ffffff);
689 else 670 else
690 gr_def(ctx, base + 0x320, 0x07ffffff); 671 gr_def(ctx, base + 0x320, 0x07ffffff);
691 672
692 if (dev_priv->chipset < 0xa0) 673 if (device->chipset < 0xa0)
693 cp_ctx(ctx, base + 0x324, 5); 674 cp_ctx(ctx, base + 0x324, 5);
694 else 675 else
695 cp_ctx(ctx, base + 0x328, 4); 676 cp_ctx(ctx, base + 0x328, 4);
696 677
697 if (dev_priv->chipset < 0xa0) { 678 if (device->chipset < 0xa0) {
698 cp_ctx(ctx, base + 0x340, 9); 679 cp_ctx(ctx, base + 0x340, 9);
699 offset = base + 0x340; 680 offset = base + 0x340;
700 } else if (!IS_NVA3F(dev_priv->chipset)) { 681 } else if (!IS_NVA3F(device->chipset)) {
701 cp_ctx(ctx, base + 0x33c, 0xb); 682 cp_ctx(ctx, base + 0x33c, 0xb);
702 offset = base + 0x344; 683 offset = base + 0x344;
703 } else { 684 } else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
706 } 687 }
707 gr_def(ctx, offset + 0x0, 0x00120407); 688 gr_def(ctx, offset + 0x0, 0x00120407);
708 gr_def(ctx, offset + 0x4, 0x05091507); 689 gr_def(ctx, offset + 0x4, 0x05091507);
709 if (dev_priv->chipset == 0x84) 690 if (device->chipset == 0x84)
710 gr_def(ctx, offset + 0x8, 0x05100202); 691 gr_def(ctx, offset + 0x8, 0x05100202);
711 else 692 else
712 gr_def(ctx, offset + 0x8, 0x05010202); 693 gr_def(ctx, offset + 0x8, 0x05010202);
713 gr_def(ctx, offset + 0xc, 0x00030201); 694 gr_def(ctx, offset + 0xc, 0x00030201);
714 if (dev_priv->chipset == 0xa3) 695 if (device->chipset == 0xa3)
715 cp_ctx(ctx, base + 0x36c, 1); 696 cp_ctx(ctx, base + 0x36c, 1);
716 697
717 cp_ctx(ctx, base + 0x400, 2); 698 cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
720 gr_def(ctx, base + 0x40c, 0x0d0c0b0a); 701 gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
721 gr_def(ctx, base + 0x410, 0x00141210); 702 gr_def(ctx, base + 0x410, 0x00141210);
722 703
723 if (dev_priv->chipset < 0xa0) 704 if (device->chipset < 0xa0)
724 offset = base + 0x800; 705 offset = base + 0x800;
725 else 706 else
726 offset = base + 0x500; 707 offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
728 gr_def(ctx, offset + 0x0, 0x000001f0); 709 gr_def(ctx, offset + 0x0, 0x000001f0);
729 gr_def(ctx, offset + 0x4, 0x00000001); 710 gr_def(ctx, offset + 0x4, 0x00000001);
730 gr_def(ctx, offset + 0x8, 0x00000003); 711 gr_def(ctx, offset + 0x8, 0x00000003);
731 if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) 712 if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
732 gr_def(ctx, offset + 0xc, 0x00008000); 713 gr_def(ctx, offset + 0xc, 0x00008000);
733 gr_def(ctx, offset + 0x14, 0x00039e00); 714 gr_def(ctx, offset + 0x14, 0x00039e00);
734 cp_ctx(ctx, offset + 0x1c, 2); 715 cp_ctx(ctx, offset + 0x1c, 2);
735 if (dev_priv->chipset == 0x50) 716 if (device->chipset == 0x50)
736 gr_def(ctx, offset + 0x1c, 0x00000040); 717 gr_def(ctx, offset + 0x1c, 0x00000040);
737 else 718 else
738 gr_def(ctx, offset + 0x1c, 0x00000100); 719 gr_def(ctx, offset + 0x1c, 0x00000100);
739 gr_def(ctx, offset + 0x20, 0x00003800); 720 gr_def(ctx, offset + 0x20, 0x00003800);
740 721
741 if (dev_priv->chipset >= 0xa0) { 722 if (device->chipset >= 0xa0) {
742 cp_ctx(ctx, base + 0x54c, 2); 723 cp_ctx(ctx, base + 0x54c, 2);
743 if (!IS_NVA3F(dev_priv->chipset)) 724 if (!IS_NVA3F(device->chipset))
744 gr_def(ctx, base + 0x54c, 0x003fe006); 725 gr_def(ctx, base + 0x54c, 0x003fe006);
745 else 726 else
746 gr_def(ctx, base + 0x54c, 0x003fe007); 727 gr_def(ctx, base + 0x54c, 0x003fe007);
747 gr_def(ctx, base + 0x550, 0x003fe000); 728 gr_def(ctx, base + 0x550, 0x003fe000);
748 } 729 }
749 730
750 if (dev_priv->chipset < 0xa0) 731 if (device->chipset < 0xa0)
751 offset = base + 0xa00; 732 offset = base + 0xa00;
752 else 733 else
753 offset = base + 0x680; 734 offset = base + 0x680;
754 cp_ctx(ctx, offset, 1); 735 cp_ctx(ctx, offset, 1);
755 gr_def(ctx, offset, 0x00404040); 736 gr_def(ctx, offset, 0x00404040);
756 737
757 if (dev_priv->chipset < 0xa0) 738 if (device->chipset < 0xa0)
758 offset = base + 0xe00; 739 offset = base + 0xe00;
759 else 740 else
760 offset = base + 0x700; 741 offset = base + 0x700;
761 cp_ctx(ctx, offset, 2); 742 cp_ctx(ctx, offset, 2);
762 if (dev_priv->chipset < 0xa0) 743 if (device->chipset < 0xa0)
763 gr_def(ctx, offset, 0x0077f005); 744 gr_def(ctx, offset, 0x0077f005);
764 else if (dev_priv->chipset == 0xa5) 745 else if (device->chipset == 0xa5)
765 gr_def(ctx, offset, 0x6cf7f007); 746 gr_def(ctx, offset, 0x6cf7f007);
766 else if (dev_priv->chipset == 0xa8) 747 else if (device->chipset == 0xa8)
767 gr_def(ctx, offset, 0x6cfff007); 748 gr_def(ctx, offset, 0x6cfff007);
768 else if (dev_priv->chipset == 0xac) 749 else if (device->chipset == 0xac)
769 gr_def(ctx, offset, 0x0cfff007); 750 gr_def(ctx, offset, 0x0cfff007);
770 else 751 else
771 gr_def(ctx, offset, 0x0cf7f007); 752 gr_def(ctx, offset, 0x0cf7f007);
772 if (dev_priv->chipset == 0x50) 753 if (device->chipset == 0x50)
773 gr_def(ctx, offset + 0x4, 0x00007fff); 754 gr_def(ctx, offset + 0x4, 0x00007fff);
774 else if (dev_priv->chipset < 0xa0) 755 else if (device->chipset < 0xa0)
775 gr_def(ctx, offset + 0x4, 0x003f7fff); 756 gr_def(ctx, offset + 0x4, 0x003f7fff);
776 else 757 else
777 gr_def(ctx, offset + 0x4, 0x02bf7fff); 758 gr_def(ctx, offset + 0x4, 0x02bf7fff);
778 cp_ctx(ctx, offset + 0x2c, 1); 759 cp_ctx(ctx, offset + 0x2c, 1);
779 if (dev_priv->chipset == 0x50) { 760 if (device->chipset == 0x50) {
780 cp_ctx(ctx, offset + 0x50, 9); 761 cp_ctx(ctx, offset + 0x50, 9);
781 gr_def(ctx, offset + 0x54, 0x000003ff); 762 gr_def(ctx, offset + 0x54, 0x000003ff);
782 gr_def(ctx, offset + 0x58, 0x00000003); 763 gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
785 gr_def(ctx, offset + 0x64, 0x0000001f); 766 gr_def(ctx, offset + 0x64, 0x0000001f);
786 gr_def(ctx, offset + 0x68, 0x0000000f); 767 gr_def(ctx, offset + 0x68, 0x0000000f);
787 gr_def(ctx, offset + 0x6c, 0x0000000f); 768 gr_def(ctx, offset + 0x6c, 0x0000000f);
788 } else if (dev_priv->chipset < 0xa0) { 769 } else if (device->chipset < 0xa0) {
789 cp_ctx(ctx, offset + 0x50, 1); 770 cp_ctx(ctx, offset + 0x50, 1);
790 cp_ctx(ctx, offset + 0x70, 1); 771 cp_ctx(ctx, offset + 0x70, 1);
791 } else { 772 } else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
797} 778}
798 779
799static void 780static void
800dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 781dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
801 int i; 782 int i;
802 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 783 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
803 for (i = 0; i < num; i++) 784 for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
808static void 789static void
809nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) 790nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
810{ 791{
811 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 792 struct nouveau_device *device = ctx->device;
812 int base, num; 793 int base, num;
813 base = ctx->ctxvals_pos; 794 base = ctx->ctxvals_pos;
814 795
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
822 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ 803 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
823 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ 804 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
824 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ 805 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
825 if (dev_priv->chipset >= 0x94) 806 if (device->chipset >= 0x94)
826 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ 807 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
827 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ 808 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
828 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ 809 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
851 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ 832 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
852 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ 833 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
853 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ 834 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
854 if (IS_NVA3F(dev_priv->chipset)) 835 if (IS_NVA3F(device->chipset))
855 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ 836 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
856 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ 837 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
857 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ 838 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
863 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ 844 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
864 845
865 /* compat 2d state */ 846 /* compat 2d state */
866 if (dev_priv->chipset == 0x50) { 847 if (device->chipset == 0x50) {
867 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ 848 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
868 849
869 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ 850 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
923 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ 904 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
924 905
925 /* more compat 2d state */ 906 /* more compat 2d state */
926 if (dev_priv->chipset == 0x50) { 907 if (device->chipset == 0x50) {
927 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ 908 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
928 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ 909 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
929 910
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
957 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ 938 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
958 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ 939 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
959 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ 940 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
960 if (IS_NVA3F(dev_priv->chipset)) { 941 if (IS_NVA3F(device->chipset)) {
961 dd_emit(ctx, 1, 0); /* ffffffff */ 942 dd_emit(ctx, 1, 0); /* ffffffff */
962 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ 943 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
963 } else { 944 } else {
964 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ 945 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
965 } 946 }
966 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ 947 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
967 if (dev_priv->chipset != 0x50) 948 if (device->chipset != 0x50)
968 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ 949 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
969 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ 950 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
970 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ 951 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
971 if (dev_priv->chipset == 0x50) { 952 if (device->chipset == 0x50) {
972 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ 953 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
973 dd_emit(ctx, 1, 0); /* 00000001 */ 954 dd_emit(ctx, 1, 0); /* 00000001 */
974 } else { 955 } else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
994 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ 975 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
995 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ 976 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
996 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ 977 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
997 if (dev_priv->chipset != 0x50) 978 if (device->chipset != 0x50)
998 dd_emit(ctx, 3, 0); /* 1, 1, 1 */ 979 dd_emit(ctx, 3, 0); /* 1, 1, 1 */
999 else 980 else
1000 dd_emit(ctx, 2, 0); /* 1, 1 */ 981 dd_emit(ctx, 2, 0); /* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1002 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ 983 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
1003 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 984 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1004 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 985 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1005 if (IS_NVA3F(dev_priv->chipset)) { 986 if (IS_NVA3F(device->chipset)) {
1006 dd_emit(ctx, 1, 3); /* 00000003 */ 987 dd_emit(ctx, 1, 3); /* 00000003 */
1007 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ 988 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
1008 } 989 }
1009 if (dev_priv->chipset != 0x50) 990 if (device->chipset != 0x50)
1010 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ 991 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
1011 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ 992 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
1012 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ 993 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
1013 if (dev_priv->chipset != 0x50) 994 if (device->chipset != 0x50)
1014 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ 995 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
1015 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ 996 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
1016 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ 997 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1022 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 1003 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
1023 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ 1004 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
1024 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 1005 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
1025 if (dev_priv->chipset >= 0xa0) 1006 if (device->chipset >= 0xa0)
1026 dd_emit(ctx, 1, 0); /* ffffffff */ 1007 dd_emit(ctx, 1, 0); /* ffffffff */
1027 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ 1008 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
1028 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ 1009 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
1029 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ 1010 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
1030 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ 1011 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
1031 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ 1012 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
1032 if (dev_priv->chipset != 0x50) 1013 if (device->chipset != 0x50)
1033 dd_emit(ctx, 8, 0); /* 00000001 */ 1014 dd_emit(ctx, 8, 0); /* 00000001 */
1034 if (dev_priv->chipset >= 0xa0) { 1015 if (device->chipset >= 0xa0) {
1035 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ 1016 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
1036 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ 1017 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
1037 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ 1018 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1042 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 1023 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
1043 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ 1024 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
1044 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ 1025 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
1045 if (IS_NVA3F(dev_priv->chipset)) 1026 if (IS_NVA3F(device->chipset))
1046 dd_emit(ctx, 1, 0); /* 00000001 */ 1027 dd_emit(ctx, 1, 0); /* 00000001 */
1047 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ 1028 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
1048 if (dev_priv->chipset >= 0xa0) 1029 if (device->chipset >= 0xa0)
1049 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ 1030 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
1050 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ 1031 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
1051 if (dev_priv->chipset >= 0xa0) 1032 if (device->chipset >= 0xa0)
1052 dd_emit(ctx, 1, 0); /* 00000003 */ 1033 dd_emit(ctx, 1, 0); /* 00000003 */
1053 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ 1034 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
1054 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ 1035 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
1055 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ 1036 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
1056 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ 1037 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
1057 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ 1038 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
1058 if (dev_priv->chipset != 0x50) { 1039 if (device->chipset != 0x50) {
1059 dd_emit(ctx, 1, 0xe00); /* 7fff */ 1040 dd_emit(ctx, 1, 0xe00); /* 7fff */
1060 dd_emit(ctx, 1, 0x1000); /* 7fff */ 1041 dd_emit(ctx, 1, 0x1000); /* 7fff */
1061 dd_emit(ctx, 1, 0x1e00); /* 7fff */ 1042 dd_emit(ctx, 1, 0x1e00); /* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1070 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ 1051 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
1071 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ 1052 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
1072 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ 1053 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
1073 if (IS_NVA3F(dev_priv->chipset)) 1054 if (IS_NVA3F(device->chipset))
1074 dd_emit(ctx, 1, 0x200); 1055 dd_emit(ctx, 1, 0x200);
1075 dd_emit(ctx, 1, 0); /* 00000001 */ 1056 dd_emit(ctx, 1, 0); /* 00000001 */
1076 if (dev_priv->chipset < 0xa0) { 1057 if (device->chipset < 0xa0) {
1077 dd_emit(ctx, 1, 1); /* 00000001 */ 1058 dd_emit(ctx, 1, 1); /* 00000001 */
1078 dd_emit(ctx, 1, 0x70); /* 000000ff */ 1059 dd_emit(ctx, 1, 0x70); /* 000000ff */
1079 dd_emit(ctx, 1, 0x80); /* 000000ff */ 1060 dd_emit(ctx, 1, 0x80); /* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1120 1101
1121 num = ctx->ctxvals_pos - base; 1102 num = ctx->ctxvals_pos - base;
1122 ctx->ctxvals_pos = base; 1103 ctx->ctxvals_pos = base;
1123 if (IS_NVA3F(dev_priv->chipset)) 1104 if (IS_NVA3F(device->chipset))
1124 cp_ctx(ctx, 0x404800, num); 1105 cp_ctx(ctx, 0x404800, num);
1125 else 1106 else
1126 cp_ctx(ctx, 0x405400, num); 1107 cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
1169 */ 1150 */
1170 1151
1171static void 1152static void
1172xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { 1153xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
1173 int i; 1154 int i;
1174 if (val && ctx->mode == NOUVEAU_GRCTX_VALS) 1155 if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
1175 for (i = 0; i < num; i++) 1156 for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
1201static void 1182static void
1202nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) 1183nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1203{ 1184{
1204 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1185 struct nouveau_device *device = ctx->device;
1205 int i; 1186 int i;
1206 int offset; 1187 int offset;
1207 int size = 0; 1188 int size = 0;
1208 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 1189 u32 units = nv_rd32 (ctx->device, 0x1540);
1209 1190
1210 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 1191 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
1211 ctx->ctxvals_base = offset; 1192 ctx->ctxvals_base = offset;
1212 1193
1213 if (dev_priv->chipset < 0xa0) { 1194 if (device->chipset < 0xa0) {
1214 /* Strand 0 */ 1195 /* Strand 0 */
1215 ctx->ctxvals_pos = offset; 1196 ctx->ctxvals_pos = offset;
1216 nv50_graph_construct_gene_dispatch(ctx); 1197 nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1280 1261
1281 /* Strand 2 */ 1262 /* Strand 2 */
1282 ctx->ctxvals_pos = offset + 2; 1263 ctx->ctxvals_pos = offset + 2;
1283 if (dev_priv->chipset == 0xa0) 1264 if (device->chipset == 0xa0)
1284 nv50_graph_construct_gene_unk14xx(ctx); 1265 nv50_graph_construct_gene_unk14xx(ctx);
1285 nv50_graph_construct_gene_unk24xx(ctx); 1266 nv50_graph_construct_gene_unk24xx(ctx);
1286 if ((ctx->ctxvals_pos-offset)/8 > size) 1267 if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1327 1308
1328 /* Strand 7 */ 1309 /* Strand 7 */
1329 ctx->ctxvals_pos = offset + 7; 1310 ctx->ctxvals_pos = offset + 7;
1330 if (dev_priv->chipset == 0xa0) { 1311 if (device->chipset == 0xa0) {
1331 if (units & (1 << 4)) 1312 if (units & (1 << 4))
1332 nv50_graph_construct_xfer_tp(ctx); 1313 nv50_graph_construct_xfer_tp(ctx);
1333 if (units & (1 << 5)) 1314 if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
1365nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) 1346nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1366{ 1347{
1367 /* start of strand 0 */ 1348 /* start of strand 0 */
1368 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1349 struct nouveau_device *device = ctx->device;
1369 /* SEEK */ 1350 /* SEEK */
1370 if (dev_priv->chipset == 0x50) 1351 if (device->chipset == 0x50)
1371 xf_emit(ctx, 5, 0); 1352 xf_emit(ctx, 5, 0);
1372 else if (!IS_NVA3F(dev_priv->chipset)) 1353 else if (!IS_NVA3F(device->chipset))
1373 xf_emit(ctx, 6, 0); 1354 xf_emit(ctx, 6, 0);
1374 else 1355 else
1375 xf_emit(ctx, 4, 0); 1356 xf_emit(ctx, 4, 0);
1376 /* SEEK */ 1357 /* SEEK */
1377 /* the PGRAPH's internal FIFO */ 1358 /* the PGRAPH's internal FIFO */
1378 if (dev_priv->chipset == 0x50) 1359 if (device->chipset == 0x50)
1379 xf_emit(ctx, 8*3, 0); 1360 xf_emit(ctx, 8*3, 0);
1380 else 1361 else
1381 xf_emit(ctx, 0x100*3, 0); 1362 xf_emit(ctx, 0x100*3, 0);
1382 /* and another bonus slot?!? */ 1363 /* and another bonus slot?!? */
1383 xf_emit(ctx, 3, 0); 1364 xf_emit(ctx, 3, 0);
1384 /* and YET ANOTHER bonus slot? */ 1365 /* and YET ANOTHER bonus slot? */
1385 if (IS_NVA3F(dev_priv->chipset)) 1366 if (IS_NVA3F(device->chipset))
1386 xf_emit(ctx, 3, 0); 1367 xf_emit(ctx, 3, 0);
1387 /* SEEK */ 1368 /* SEEK */
1388 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ 1369 /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1394 /* SEEK */ 1375 /* SEEK */
1395 xf_emit(ctx, 9, 0); 1376 xf_emit(ctx, 9, 0);
1396 /* SEEK */ 1377 /* SEEK */
1397 if (dev_priv->chipset < 0x90) 1378 if (device->chipset < 0x90)
1398 xf_emit(ctx, 4, 0); 1379 xf_emit(ctx, 4, 0);
1399 /* SEEK */ 1380 /* SEEK */
1400 xf_emit(ctx, 2, 0); 1381 xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
1407 xf_emit(ctx, 6*2, 0); 1388 xf_emit(ctx, 6*2, 0);
1408 xf_emit(ctx, 2, 0); 1389 xf_emit(ctx, 2, 0);
1409 /* SEEK */ 1390 /* SEEK */
1410 if (dev_priv->chipset == 0x50) 1391 if (device->chipset == 0x50)
1411 xf_emit(ctx, 0x1c, 0); 1392 xf_emit(ctx, 0x1c, 0);
1412 else if (dev_priv->chipset < 0xa0) 1393 else if (device->chipset < 0xa0)
1413 xf_emit(ctx, 0x1e, 0); 1394 xf_emit(ctx, 0x1e, 0);
1414 else 1395 else
1415 xf_emit(ctx, 0x22, 0); 1396 xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
1421nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) 1402nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1422{ 1403{
1423 /* Strand 0, right after dispatch */ 1404 /* Strand 0, right after dispatch */
1424 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1405 struct nouveau_device *device = ctx->device;
1425 int smallm2mf = 0; 1406 int smallm2mf = 0;
1426 if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) 1407 if (device->chipset < 0x92 || device->chipset == 0x98)
1427 smallm2mf = 1; 1408 smallm2mf = 1;
1428 /* SEEK */ 1409 /* SEEK */
1429 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ 1410 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
1472static void 1453static void
1473nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) 1454nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1474{ 1455{
1475 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1456 struct nouveau_device *device = ctx->device;
1476 xf_emit(ctx, 2, 0); /* RO */ 1457 xf_emit(ctx, 2, 0); /* RO */
1477 xf_emit(ctx, 0x800, 0); /* ffffffff */ 1458 xf_emit(ctx, 0x800, 0); /* ffffffff */
1478 switch (dev_priv->chipset) { 1459 switch (device->chipset) {
1479 case 0x50: 1460 case 0x50:
1480 case 0x92: 1461 case 0x92:
1481 case 0xa0: 1462 case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
1540static void 1521static void
1541nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) 1522nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1542{ 1523{
1543 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1524 struct nouveau_device *device = ctx->device;
1544 int i; 1525 int i;
1545 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1526 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1546 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1527 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1550 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ 1531 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
1551 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1532 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1552 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1533 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
1553 if (dev_priv->chipset == 0x50) 1534 if (device->chipset == 0x50)
1554 xf_emit(ctx, 1, 0x3ff); 1535 xf_emit(ctx, 1, 0x3ff);
1555 else 1536 else
1556 xf_emit(ctx, 1, 0x7ff); /* 000007ff */ 1537 xf_emit(ctx, 1, 0x7ff); /* 000007ff */
1557 xf_emit(ctx, 1, 0); /* 111/113 */ 1538 xf_emit(ctx, 1, 0); /* 111/113 */
1558 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1539 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1559 for (i = 0; i < 8; i++) { 1540 for (i = 0; i < 8; i++) {
1560 switch (dev_priv->chipset) { 1541 switch (device->chipset) {
1561 case 0x50: 1542 case 0x50:
1562 case 0x86: 1543 case 0x86:
1563 case 0x98: 1544 case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
1600static void 1581static void
1601nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) 1582nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1602{ 1583{
1603 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1584 struct nouveau_device *device = ctx->device;
1604 /* end of area 2 on pre-NVA0, area 1 on NVAx */ 1585 /* end of area 2 on pre-NVA0, area 1 on NVAx */
1605 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1586 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1606 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ 1587 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1614 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1595 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1615 xf_emit(ctx, 1, 0); /* 00000007 */ 1596 xf_emit(ctx, 1, 0); /* 00000007 */
1616 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ 1597 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
1617 if (dev_priv->chipset >= 0xa0) 1598 if (device->chipset >= 0xa0)
1618 xf_emit(ctx, 1, 0x0fac6881); 1599 xf_emit(ctx, 1, 0x0fac6881);
1619 if (IS_NVA3F(dev_priv->chipset)) { 1600 if (IS_NVA3F(device->chipset)) {
1620 xf_emit(ctx, 1, 1); 1601 xf_emit(ctx, 1, 1);
1621 xf_emit(ctx, 3, 0); 1602 xf_emit(ctx, 3, 0);
1622 } 1603 }
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
1625static void 1606static void
1626nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) 1607nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1627{ 1608{
1628 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1609 struct nouveau_device *device = ctx->device;
1629 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ 1610 /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
1630 if (dev_priv->chipset != 0x50) { 1611 if (device->chipset != 0x50) {
1631 xf_emit(ctx, 5, 0); /* ffffffff */ 1612 xf_emit(ctx, 5, 0); /* ffffffff */
1632 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1613 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1633 xf_emit(ctx, 1, 0); /* 00000001 */ 1614 xf_emit(ctx, 1, 0); /* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1643 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1624 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1644 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ 1625 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
1645 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ 1626 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
1646 if (dev_priv->chipset != 0x50) 1627 if (device->chipset != 0x50)
1647 xf_emit(ctx, 1, 0); /* 3ff */ 1628 xf_emit(ctx, 1, 0); /* 3ff */
1648 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ 1629 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
1649 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ 1630 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
1650 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ 1631 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
1651 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ 1632 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
1652 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1633 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1653 if (dev_priv->chipset != 0x50) 1634 if (device->chipset != 0x50)
1654 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ 1635 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
1655 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1636 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1656 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ 1637 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1669 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ 1650 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
1670 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ 1651 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
1671 xf_emit(ctx, 1, 0); /* 0000000f */ 1652 xf_emit(ctx, 1, 0); /* 0000000f */
1672 if (dev_priv->chipset == 0x50) 1653 if (device->chipset == 0x50)
1673 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 1654 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
1674 else 1655 else
1675 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 1656 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1704 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 1685 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
1705 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 1686 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
1706 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ 1687 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
1707 if (IS_NVA3F(dev_priv->chipset)) 1688 if (IS_NVA3F(device->chipset))
1708 xf_emit(ctx, 1, 0); /* 00000001 */ 1689 xf_emit(ctx, 1, 0); /* 00000001 */
1709 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 1690 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
1710 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ 1691 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
1711 if (dev_priv->chipset != 0x50) { 1692 if (device->chipset != 0x50) {
1712 xf_emit(ctx, 1, 0); /* ffffffff */ 1693 xf_emit(ctx, 1, 0); /* ffffffff */
1713 xf_emit(ctx, 1, 0); /* 00000001 */ 1694 xf_emit(ctx, 1, 0); /* 00000001 */
1714 xf_emit(ctx, 1, 0); /* 000003ff */ 1695 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
1736static void 1717static void
1737nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) 1718nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1738{ 1719{
1739 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1720 struct nouveau_device *device = ctx->device;
1740 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ 1721 /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
1741 /* SEEK */ 1722 /* SEEK */
1742 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ 1723 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1774 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 1755 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
1775 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 1756 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
1776 xf_emit(ctx, 1, 0); /* 00000007 */ 1757 xf_emit(ctx, 1, 0); /* 00000007 */
1777 if (dev_priv->chipset != 0x50) 1758 if (device->chipset != 0x50)
1778 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ 1759 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
1779 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 1760 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
1780 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 1761 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
1789 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ 1770 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
1790 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ 1771 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
1791 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ 1772 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
1792 if (dev_priv->chipset != 0x50) 1773 if (device->chipset != 0x50)
1793 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ 1774 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
1794 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ 1775 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
1795} 1776}
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
1817static void 1798static void
1818nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) 1799nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1819{ 1800{
1820 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1801 struct nouveau_device *device = ctx->device;
1821 int i; 1802 int i;
1822 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ 1803 /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
1823 /* SEEK */ 1804 /* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1829 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1810 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1830 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1811 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1831 /* SEEK */ 1812 /* SEEK */
1832 if (IS_NVA3F(dev_priv->chipset)) { 1813 if (IS_NVA3F(device->chipset)) {
1833 xf_emit(ctx, 4, 0); /* RO */ 1814 xf_emit(ctx, 4, 0); /* RO */
1834 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ 1815 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
1835 xf_emit(ctx, 1, 0); /* 1ff */ 1816 xf_emit(ctx, 1, 0); /* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1860 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 1841 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
1861 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 1842 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
1862 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1843 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1863 if (dev_priv->chipset != 0x50) 1844 if (device->chipset != 0x50)
1864 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 1845 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
1865 /* SEEK */ 1846 /* SEEK */
1866 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1847 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1869 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1850 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1870 xf_emit(ctx, 1, 1); /* 00000001 */ 1851 xf_emit(ctx, 1, 1); /* 00000001 */
1871 /* SEEK */ 1852 /* SEEK */
1872 if (dev_priv->chipset >= 0xa0) 1853 if (device->chipset >= 0xa0)
1873 xf_emit(ctx, 2, 4); /* 000000ff */ 1854 xf_emit(ctx, 2, 4); /* 000000ff */
1874 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ 1855 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
1875 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 1856 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
1893 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ 1874 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
1894 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 1875 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
1895 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 1876 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
1896 if (dev_priv->chipset != 0x50) 1877 if (device->chipset != 0x50)
1897 xf_emit(ctx, 1, 0); /* 000003ff */ 1878 xf_emit(ctx, 1, 0); /* 000003ff */
1898} 1879}
1899 1880
1900static void 1881static void
1901nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) 1882nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1902{ 1883{
1903 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 1884 struct nouveau_device *device = ctx->device;
1904 int acnt = 0x10, rep, i; 1885 int acnt = 0x10, rep, i;
1905 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ 1886 /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
1906 if (IS_NVA3F(dev_priv->chipset)) 1887 if (IS_NVA3F(device->chipset))
1907 acnt = 0x20; 1888 acnt = 0x20;
1908 /* SEEK */ 1889 /* SEEK */
1909 if (dev_priv->chipset >= 0xa0) { 1890 if (device->chipset >= 0xa0) {
1910 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ 1891 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
1911 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ 1892 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
1912 } 1893 }
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1923 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ 1904 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
1924 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 1905 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
1925 /* SEEK */ 1906 /* SEEK */
1926 if (IS_NVA3F(dev_priv->chipset)) 1907 if (IS_NVA3F(device->chipset))
1927 xf_emit(ctx, 0xb, 0); /* RO */ 1908 xf_emit(ctx, 0xb, 0); /* RO */
1928 else if (dev_priv->chipset >= 0xa0) 1909 else if (device->chipset >= 0xa0)
1929 xf_emit(ctx, 0x9, 0); /* RO */ 1910 xf_emit(ctx, 0x9, 0); /* RO */
1930 else 1911 else
1931 xf_emit(ctx, 0x8, 0); /* RO */ 1912 xf_emit(ctx, 0x8, 0); /* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1944 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ 1925 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
1945 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ 1926 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
1946 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 1927 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
1947 if (dev_priv->chipset == 0x50) 1928 if (device->chipset == 0x50)
1948 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ 1929 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
1949 else 1930 else
1950 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ 1931 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
1951 if (dev_priv->chipset == 0xa8) 1932 if (device->chipset == 0xa8)
1952 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 1933 xf_emit(ctx, 1, 0x1e00); /* 7fff */
1953 /* SEEK */ 1934 /* SEEK */
1954 xf_emit(ctx, 0xc, 0); /* RO or close */ 1935 xf_emit(ctx, 0xc, 0); /* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1956 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 1937 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
1957 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 1938 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
1958 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 1939 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
1959 if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) 1940 if (device->chipset > 0x50 && device->chipset < 0xa0)
1960 xf_emit(ctx, 2, 0); /* ffffffff */ 1941 xf_emit(ctx, 2, 0); /* ffffffff */
1961 else 1942 else
1962 xf_emit(ctx, 1, 0); /* ffffffff */ 1943 xf_emit(ctx, 1, 0); /* ffffffff */
1963 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ 1944 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
1964 /* SEEK */ 1945 /* SEEK */
1965 if (IS_NVA3F(dev_priv->chipset)) { 1946 if (IS_NVA3F(device->chipset)) {
1966 xf_emit(ctx, 0x10, 0); /* 0? */ 1947 xf_emit(ctx, 0x10, 0); /* 0? */
1967 xf_emit(ctx, 2, 0); /* weird... */ 1948 xf_emit(ctx, 2, 0); /* weird... */
1968 xf_emit(ctx, 2, 0); /* RO */ 1949 xf_emit(ctx, 2, 0); /* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
1975 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ 1956 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
1976 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ 1957 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
1977 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ 1958 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
1978 if (dev_priv->chipset >= 0xa0) 1959 if (device->chipset >= 0xa0)
1979 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ 1960 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
1980 /* SEEK */ 1961 /* SEEK */
1981 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ 1962 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2013 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ 1994 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
2014 xf_emit(ctx, 3, 0); /* f/1f */ 1995 xf_emit(ctx, 3, 0); /* f/1f */
2015 /* SEEK */ 1996 /* SEEK */
2016 if (IS_NVA3F(dev_priv->chipset)) { 1997 if (IS_NVA3F(device->chipset)) {
2017 xf_emit(ctx, acnt, 0); /* f */ 1998 xf_emit(ctx, acnt, 0); /* f */
2018 xf_emit(ctx, 3, 0); /* f/1f */ 1999 xf_emit(ctx, 3, 0); /* f/1f */
2019 } 2000 }
2020 /* SEEK */ 2001 /* SEEK */
2021 if (IS_NVA3F(dev_priv->chipset)) 2002 if (IS_NVA3F(device->chipset))
2022 xf_emit(ctx, 2, 0); /* RO */ 2003 xf_emit(ctx, 2, 0); /* RO */
2023 else 2004 else
2024 xf_emit(ctx, 5, 0); /* RO */ 2005 xf_emit(ctx, 5, 0); /* RO */
2025 /* SEEK */ 2006 /* SEEK */
2026 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ 2007 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
2027 /* SEEK */ 2008 /* SEEK */
2028 if (dev_priv->chipset < 0xa0) { 2009 if (device->chipset < 0xa0) {
2029 xf_emit(ctx, 0x41, 0); /* RO */ 2010 xf_emit(ctx, 0x41, 0); /* RO */
2030 /* SEEK */ 2011 /* SEEK */
2031 xf_emit(ctx, 0x11, 0); /* RO */ 2012 xf_emit(ctx, 0x11, 0); /* RO */
2032 } else if (!IS_NVA3F(dev_priv->chipset)) 2013 } else if (!IS_NVA3F(device->chipset))
2033 xf_emit(ctx, 0x50, 0); /* RO */ 2014 xf_emit(ctx, 0x50, 0); /* RO */
2034 else 2015 else
2035 xf_emit(ctx, 0x58, 0); /* RO */ 2016 xf_emit(ctx, 0x58, 0); /* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2041 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ 2022 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
2042 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ 2023 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
2043 /* SEEK */ 2024 /* SEEK */
2044 if (IS_NVA3F(dev_priv->chipset)) 2025 if (IS_NVA3F(device->chipset))
2045 xf_emit(ctx, 0x1d, 0); /* RO */ 2026 xf_emit(ctx, 0x1d, 0); /* RO */
2046 else 2027 else
2047 xf_emit(ctx, 0x16, 0); /* RO */ 2028 xf_emit(ctx, 0x16, 0); /* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2049 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ 2030 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
2050 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ 2031 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
2051 /* SEEK */ 2032 /* SEEK */
2052 if (dev_priv->chipset < 0xa0) 2033 if (device->chipset < 0xa0)
2053 xf_emit(ctx, 8, 0); /* RO */ 2034 xf_emit(ctx, 8, 0); /* RO */
2054 else if (IS_NVA3F(dev_priv->chipset)) 2035 else if (IS_NVA3F(device->chipset))
2055 xf_emit(ctx, 0xc, 0); /* RO */ 2036 xf_emit(ctx, 0xc, 0); /* RO */
2056 else 2037 else
2057 xf_emit(ctx, 7, 0); /* RO */ 2038 xf_emit(ctx, 7, 0); /* RO */
2058 /* SEEK */ 2039 /* SEEK */
2059 xf_emit(ctx, 0xa, 0); /* RO */ 2040 xf_emit(ctx, 0xa, 0); /* RO */
2060 if (dev_priv->chipset == 0xa0) 2041 if (device->chipset == 0xa0)
2061 rep = 0xc; 2042 rep = 0xc;
2062 else 2043 else
2063 rep = 4; 2044 rep = 4;
2064 for (i = 0; i < rep; i++) { 2045 for (i = 0; i < rep; i++) {
2065 /* SEEK */ 2046 /* SEEK */
2066 if (IS_NVA3F(dev_priv->chipset)) 2047 if (IS_NVA3F(device->chipset))
2067 xf_emit(ctx, 0x20, 0); /* ffffffff */ 2048 xf_emit(ctx, 0x20, 0); /* ffffffff */
2068 xf_emit(ctx, 0x200, 0); /* ffffffff */ 2049 xf_emit(ctx, 0x200, 0); /* ffffffff */
2069 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ 2050 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2077 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ 2058 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
2078 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2059 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2079 /* SEEK */ 2060 /* SEEK */
2080 if (IS_NVA3F(dev_priv->chipset)) 2061 if (IS_NVA3F(device->chipset))
2081 xf_emit(ctx, 7, 0); /* weird... */ 2062 xf_emit(ctx, 7, 0); /* weird... */
2082 else 2063 else
2083 xf_emit(ctx, 5, 0); /* weird... */ 2064 xf_emit(ctx, 5, 0); /* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
2086static void 2067static void
2087nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) 2068nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2088{ 2069{
2089 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2070 struct nouveau_device *device = ctx->device;
2090 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ 2071 /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
2091 /* SEEK */ 2072 /* SEEK */
2092 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ 2073 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
2093 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ 2074 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
2094 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ 2075 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
2095 if (dev_priv->chipset < 0xa0) { 2076 if (device->chipset < 0xa0) {
2096 /* this is useless on everything but the original NV50, 2077 /* this is useless on everything but the original NV50,
2097 * guess they forgot to nuke it. Or just didn't bother. */ 2078 * guess they forgot to nuke it. Or just didn't bother. */
2098 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ 2079 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
2148static void 2129static void
2149nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) 2130nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2150{ 2131{
2151 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2132 struct nouveau_device *device = ctx->device;
2152 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ 2133 /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
2153 /* SEEK */ 2134 /* SEEK */
2154 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ 2135 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2173 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 2154 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
2174 /* SEEK */ 2155 /* SEEK */
2175 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ 2156 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
2176 switch (dev_priv->chipset) { 2157 switch (device->chipset) {
2177 case 0x50: 2158 case 0x50:
2178 case 0x92: 2159 case 0x92:
2179 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ 2160 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
2247static void 2228static void
2248nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) 2229nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2249{ 2230{
2250 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2231 struct nouveau_device *device = ctx->device;
2251 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ 2232 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
2252 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ 2233 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
2253 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2234 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2277 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ 2258 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
2278 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2259 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2279 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2260 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2280 if (IS_NVA3F(dev_priv->chipset)) 2261 if (IS_NVA3F(device->chipset))
2281 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ 2262 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
2282 else if (dev_priv->chipset >= 0xa0) 2263 else if (device->chipset >= 0xa0)
2283 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ 2264 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
2284 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 2265 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
2285 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 2266 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2293 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ 2274 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
2294 xf_emit(ctx, 1, 0); /* 00000001 */ 2275 xf_emit(ctx, 1, 0); /* 00000001 */
2295 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ 2276 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
2296 if (dev_priv->chipset != 0x50) { 2277 if (device->chipset != 0x50) {
2297 xf_emit(ctx, 1, 0); /* 3ff */ 2278 xf_emit(ctx, 1, 0); /* 3ff */
2298 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ 2279 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
2299 } 2280 }
2300 if (IS_NVA3F(dev_priv->chipset)) 2281 if (IS_NVA3F(device->chipset))
2301 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ 2282 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
2302 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ 2283 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
2303 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ 2284 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2316 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2297 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2317 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ 2298 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
2318 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ 2299 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
2319 if (dev_priv->chipset != 0x50) 2300 if (device->chipset != 0x50)
2320 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2301 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2321 if (dev_priv->chipset < 0xa0) 2302 if (device->chipset < 0xa0)
2322 xf_emit(ctx, 0x1c, 0); /* RO */ 2303 xf_emit(ctx, 0x1c, 0); /* RO */
2323 else if (IS_NVA3F(dev_priv->chipset)) 2304 else if (IS_NVA3F(device->chipset))
2324 xf_emit(ctx, 0x9, 0); 2305 xf_emit(ctx, 0x9, 0);
2325 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2306 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2326 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ 2307 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2328 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 2309 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
2329 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ 2310 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
2330 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ 2311 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
2331 if (dev_priv->chipset != 0x50) { 2312 if (device->chipset != 0x50) {
2332 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ 2313 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
2333 xf_emit(ctx, 1, 0); /* 3ff */ 2314 xf_emit(ctx, 1, 0); /* 3ff */
2334 } 2315 }
2335 /* XXX: the following block could belong either to unk1cxx, or 2316 /* XXX: the following block could belong either to unk1cxx, or
2336 * to STRMOUT. Rather hard to tell. */ 2317 * to STRMOUT. Rather hard to tell. */
2337 if (dev_priv->chipset < 0xa0) 2318 if (device->chipset < 0xa0)
2338 xf_emit(ctx, 0x25, 0); 2319 xf_emit(ctx, 0x25, 0);
2339 else 2320 else
2340 xf_emit(ctx, 0x3b, 0); 2321 xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
2343static void 2324static void
2344nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) 2325nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2345{ 2326{
2346 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2327 struct nouveau_device *device = ctx->device;
2347 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ 2328 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
2348 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ 2329 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
2349 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2330 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2350 if (dev_priv->chipset >= 0xa0) { 2331 if (device->chipset >= 0xa0) {
2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2332 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2352 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2333 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2353 } 2334 }
2354 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ 2335 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
2355 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ 2336 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
2356 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2337 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2357 if (dev_priv->chipset == 0x50) 2338 if (device->chipset == 0x50)
2358 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ 2339 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
2359 else 2340 else
2360 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ 2341 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2365 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ 2346 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
2366 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ 2347 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
2367 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ 2348 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
2368 if (dev_priv->chipset >= 0xa0) { 2349 if (device->chipset >= 0xa0) {
2369 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ 2350 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
2370 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ 2351 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
2371 } 2352 }
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
2385static void 2366static void
2386nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) 2367nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2387{ 2368{
2388 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2369 struct nouveau_device *device = ctx->device;
2389 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ 2370 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
2390 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ 2371 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
2391 xf_emit(ctx, 1, 0); /* 00000007 */ 2372 xf_emit(ctx, 1, 0); /* 00000007 */
2392 xf_emit(ctx, 1, 0); /* 000003ff */ 2373 xf_emit(ctx, 1, 0); /* 000003ff */
2393 if (IS_NVA3F(dev_priv->chipset)) 2374 if (IS_NVA3F(device->chipset))
2394 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2375 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2395 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2376 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2396} 2377}
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
2398static void 2379static void
2399nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) 2380nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2400{ 2381{
2401 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2382 struct nouveau_device *device = ctx->device;
2402 /* SEEK */ 2383 /* SEEK */
2403 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ 2384 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
2404 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2385 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2416 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ 2397 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
2417 xf_emit(ctx, 1, 0); /* ff/3ff */ 2398 xf_emit(ctx, 1, 0); /* ff/3ff */
2418 xf_emit(ctx, 1, 0); /* 00000007 */ 2399 xf_emit(ctx, 1, 0); /* 00000007 */
2419 if (IS_NVA3F(dev_priv->chipset)) 2400 if (IS_NVA3F(device->chipset))
2420 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ 2401 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
2421 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2402 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2422} 2403}
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
2424static void 2405static void
2425nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) 2406nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2426{ 2407{
2427 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2408 struct nouveau_device *device = ctx->device;
2428 int magic2; 2409 int magic2;
2429 if (dev_priv->chipset == 0x50) { 2410 if (device->chipset == 0x50) {
2430 magic2 = 0x00003e60; 2411 magic2 = 0x00003e60;
2431 } else if (!IS_NVA3F(dev_priv->chipset)) { 2412 } else if (!IS_NVA3F(device->chipset)) {
2432 magic2 = 0x001ffe67; 2413 magic2 = 0x001ffe67;
2433 } else { 2414 } else {
2434 magic2 = 0x00087e67; 2415 magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2446 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ 2427 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
2447 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2428 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2448 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ 2429 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
2449 if (IS_NVA3F(dev_priv->chipset)) 2430 if (IS_NVA3F(device->chipset))
2450 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2431 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2451 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ 2432 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
2452 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ 2433 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
2453 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ 2434 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
2454 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ 2435 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
2455 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ 2436 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
2456 if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) 2437 if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
2457 xf_emit(ctx, 1, 0x15); /* 000000ff */ 2438 xf_emit(ctx, 1, 0x15); /* 000000ff */
2458 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ 2439 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
2459 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ 2440 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2462 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2443 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
2463 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2444 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2464 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2445 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2465 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { 2446 if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
2466 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ 2447 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
2467 xf_emit(ctx, 1, 4); /* 7 */ 2448 xf_emit(ctx, 1, 4); /* 7 */
2468 xf_emit(ctx, 1, 0x400); /* fffffff */ 2449 xf_emit(ctx, 1, 0x400); /* fffffff */
2469 xf_emit(ctx, 1, 0x300); /* ffff */ 2450 xf_emit(ctx, 1, 0x300); /* ffff */
2470 xf_emit(ctx, 1, 0x1001); /* 1fff */ 2451 xf_emit(ctx, 1, 0x1001); /* 1fff */
2471 if (dev_priv->chipset != 0xa0) { 2452 if (device->chipset != 0xa0) {
2472 if (IS_NVA3F(dev_priv->chipset)) 2453 if (IS_NVA3F(device->chipset))
2473 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ 2454 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
2474 else 2455 else
2475 xf_emit(ctx, 1, 0x15); /* ff */ 2456 xf_emit(ctx, 1, 0x15); /* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2547 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2528 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2548 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ 2529 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
2549 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ 2530 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
2550 if (dev_priv->chipset >= 0xa0) { 2531 if (device->chipset >= 0xa0) {
2551 xf_emit(ctx, 2, 0); 2532 xf_emit(ctx, 2, 0);
2552 xf_emit(ctx, 1, 0x1001); 2533 xf_emit(ctx, 1, 0x1001);
2553 xf_emit(ctx, 0xb, 0); 2534 xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2564 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ 2545 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
2565 xf_emit(ctx, 1, 0x11); /* 3f/7f */ 2546 xf_emit(ctx, 1, 0x11); /* 3f/7f */
2566 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2547 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2567 if (dev_priv->chipset != 0x50) { 2548 if (device->chipset != 0x50) {
2568 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2549 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2569 xf_emit(ctx, 1, 0); /* 000000ff */ 2550 xf_emit(ctx, 1, 0); /* 000000ff */
2570 } 2551 }
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2581 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2562 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2582 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2563 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2583 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ 2564 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
2584 if (IS_NVA3F(dev_priv->chipset)) { 2565 if (IS_NVA3F(device->chipset)) {
2585 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ 2566 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
2586 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2567 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2587 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2568 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2600 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2581 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2601 xf_emit(ctx, 1, 0); /* 00000001 */ 2582 xf_emit(ctx, 1, 0); /* 00000001 */
2602 xf_emit(ctx, 1, 0); /* 000003ff */ 2583 xf_emit(ctx, 1, 0); /* 000003ff */
2603 } else if (dev_priv->chipset >= 0xa0) { 2584 } else if (device->chipset >= 0xa0) {
2604 xf_emit(ctx, 2, 0); /* 00000001 */ 2585 xf_emit(ctx, 2, 0); /* 00000001 */
2605 xf_emit(ctx, 1, 0); /* 00000007 */ 2586 xf_emit(ctx, 1, 0); /* 00000007 */
2606 xf_emit(ctx, 1, 0); /* 00000003 */ 2587 xf_emit(ctx, 1, 0); /* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2614 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ 2595 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
2615 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ 2596 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
2616 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ 2597 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
2617 if (dev_priv->chipset >= 0xa0) 2598 if (device->chipset >= 0xa0)
2618 xf_emit(ctx, 2, 0); /* 00000001 */ 2599 xf_emit(ctx, 2, 0); /* 00000001 */
2619 xf_emit(ctx, 1, 0); /* 000003ff */ 2600 xf_emit(ctx, 1, 0); /* 000003ff */
2620 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ 2601 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2628 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ 2609 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
2629 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2610 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2630 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ 2611 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
2631 if (dev_priv->chipset >= 0xa0) 2612 if (device->chipset >= 0xa0)
2632 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ 2613 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
2633 if (IS_NVA3F(dev_priv->chipset)) { 2614 if (IS_NVA3F(device->chipset)) {
2634 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ 2615 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
2635 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2616 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2636 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 2617 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
2659static void 2640static void
2660nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) 2641nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2661{ 2642{
2662 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2643 struct nouveau_device *device = ctx->device;
2663 int magic3; 2644 int magic3;
2664 switch (dev_priv->chipset) { 2645 switch (device->chipset) {
2665 case 0x50: 2646 case 0x50:
2666 magic3 = 0x1000; 2647 magic3 = 0x1000;
2667 break; 2648 break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2681 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2662 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2682 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2663 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2683 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ 2664 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
2684 if (IS_NVA3F(dev_priv->chipset)) 2665 if (IS_NVA3F(device->chipset))
2685 xf_emit(ctx, 0x1f, 0); /* ffffffff */ 2666 xf_emit(ctx, 0x1f, 0); /* ffffffff */
2686 else if (dev_priv->chipset >= 0xa0) 2667 else if (device->chipset >= 0xa0)
2687 xf_emit(ctx, 0x0f, 0); /* ffffffff */ 2668 xf_emit(ctx, 0x0f, 0); /* ffffffff */
2688 else 2669 else
2689 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ 2670 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
2690 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ 2671 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
2691 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ 2672 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
2692 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ 2673 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
2693 if (dev_priv->chipset >= 0xa0) 2674 if (device->chipset >= 0xa0)
2694 xf_emit(ctx, 1, 0x03020100); /* ffffffff */ 2675 xf_emit(ctx, 1, 0x03020100); /* ffffffff */
2695 else 2676 else
2696 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ 2677 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2733 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 2714 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
2734 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 2715 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
2735 xf_emit(ctx, 1, 0); /* 111/113 */ 2716 xf_emit(ctx, 1, 0); /* 111/113 */
2736 if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) 2717 if (device->chipset == 0x94 || device->chipset == 0x96)
2737 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2718 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2738 else if (dev_priv->chipset < 0xa0) 2719 else if (device->chipset < 0xa0)
2739 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ 2720 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
2740 else if (!IS_NVA3F(dev_priv->chipset)) 2721 else if (!IS_NVA3F(device->chipset))
2741 xf_emit(ctx, 0x210, 0); /* ffffffff */ 2722 xf_emit(ctx, 0x210, 0); /* ffffffff */
2742 else 2723 else
2743 xf_emit(ctx, 0x410, 0); /* ffffffff */ 2724 xf_emit(ctx, 0x410, 0); /* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
2751static void 2732static void
2752nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) 2733nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2753{ 2734{
2754 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 2735 struct nouveau_device *device = ctx->device;
2755 int magic1, magic2; 2736 int magic1, magic2;
2756 if (dev_priv->chipset == 0x50) { 2737 if (device->chipset == 0x50) {
2757 magic1 = 0x3ff; 2738 magic1 = 0x3ff;
2758 magic2 = 0x00003e60; 2739 magic2 = 0x00003e60;
2759 } else if (!IS_NVA3F(dev_priv->chipset)) { 2740 } else if (!IS_NVA3F(device->chipset)) {
2760 magic1 = 0x7ff; 2741 magic1 = 0x7ff;
2761 magic2 = 0x001ffe67; 2742 magic2 = 0x001ffe67;
2762 } else { 2743 } else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2766 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2747 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2767 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ 2748 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
2768 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2749 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2769 if (IS_NVA3F(dev_priv->chipset)) 2750 if (IS_NVA3F(device->chipset))
2770 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ 2751 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
2771 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2752 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2772 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ 2753 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2800 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ 2781 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
2801 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 2782 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
2802 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 2783 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
2803 if (IS_NVA3F(dev_priv->chipset)) { 2784 if (IS_NVA3F(device->chipset)) {
2804 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 2785 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
2805 xf_emit(ctx, 1, 0); /* 00000003 */ 2786 xf_emit(ctx, 1, 0); /* 00000003 */
2806 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ 2787 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
2807 } else if (dev_priv->chipset >= 0xa0) { 2788 } else if (device->chipset >= 0xa0) {
2808 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ 2789 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
2809 xf_emit(ctx, 1, 0); /* 00000003 */ 2790 xf_emit(ctx, 1, 0); /* 00000003 */
2810 } else { 2791 } else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2818 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ 2799 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
2819 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ 2800 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
2820 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ 2801 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
2821 if (IS_NVA3F(dev_priv->chipset)) { 2802 if (IS_NVA3F(device->chipset)) {
2822 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 2803 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
2823 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ 2804 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
2824 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ 2805 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2846 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ 2827 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
2847 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ 2828 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
2848 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ 2829 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
2849 if (IS_NVA3F(dev_priv->chipset)) 2830 if (IS_NVA3F(device->chipset))
2850 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2831 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2851 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2832 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2852 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ 2833 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2870 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ 2851 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
2871 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ 2852 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
2872 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ 2853 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
2873 if (IS_NVA3F(dev_priv->chipset)) 2854 if (IS_NVA3F(device->chipset))
2874 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2855 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2875 if (dev_priv->chipset == 0x50) 2856 if (device->chipset == 0x50)
2876 xf_emit(ctx, 1, 0); /* ff */ 2857 xf_emit(ctx, 1, 0); /* ff */
2877 else 2858 else
2878 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ 2859 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2907 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2888 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
2908 xf_emit(ctx, 1, 0); /* 00000007 */ 2889 xf_emit(ctx, 1, 0); /* 00000007 */
2909 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2890 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2910 if (IS_NVA3F(dev_priv->chipset)) 2891 if (IS_NVA3F(device->chipset))
2911 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2892 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2912 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ 2893 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
2913 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ 2894 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2945 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ 2926 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
2946 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ 2927 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
2947 xf_emit(ctx, 1, 0); /* 00000007 */ 2928 xf_emit(ctx, 1, 0); /* 00000007 */
2948 if (IS_NVA3F(dev_priv->chipset)) 2929 if (IS_NVA3F(device->chipset))
2949 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2930 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2950 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2931 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
2951 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2932 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2974 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ 2955 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
2975 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 2956 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2957 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2977 if (IS_NVA3F(dev_priv->chipset)) 2958 if (IS_NVA3F(device->chipset))
2978 xf_emit(ctx, 1, 0); /* 00000001 */ 2959 xf_emit(ctx, 1, 0); /* 00000001 */
2979 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2960 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2980 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ 2961 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
2988 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ 2969 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
2989 xf_emit(ctx, 1, 0); /* 7 */ 2970 xf_emit(ctx, 1, 0); /* 7 */
2990 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ 2971 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
2991 if (IS_NVA3F(dev_priv->chipset)) { 2972 if (IS_NVA3F(device->chipset)) {
2992 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 2973 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
2993 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2974 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
2994 } 2975 }
2995 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 2976 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
2996 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 2977 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
2997 xf_emit(ctx, 1, 0); /* ffff0ff3 */ 2978 xf_emit(ctx, 1, 0); /* ffff0ff3 */
2998 if (dev_priv->chipset >= 0xa0) 2979 if (device->chipset >= 0xa0)
2999 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ 2980 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
3000 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ 2981 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
3001 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ 2982 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3012 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ 2993 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
3013 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ 2994 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
3014 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ 2995 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
3015 if (IS_NVA3F(dev_priv->chipset)) { 2996 if (IS_NVA3F(device->chipset)) {
3016 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 2997 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3017 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ 2998 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
3018 } 2999 }
3019 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ 3000 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
3020 if (dev_priv->chipset >= 0xa0) { 3001 if (device->chipset >= 0xa0) {
3021 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ 3002 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
3022 xf_emit(ctx, 1, 0xfac6881); /* fffffff */ 3003 xf_emit(ctx, 1, 0xfac6881); /* fffffff */
3023 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ 3004 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3027 xf_emit(ctx, 2, 0); /* 7, f */ 3008 xf_emit(ctx, 2, 0); /* 7, f */
3028 xf_emit(ctx, 1, 1); /* 1 */ 3009 xf_emit(ctx, 1, 1); /* 1 */
3029 xf_emit(ctx, 1, 0); /* 7/f */ 3010 xf_emit(ctx, 1, 0); /* 7/f */
3030 if (IS_NVA3F(dev_priv->chipset)) 3011 if (IS_NVA3F(device->chipset))
3031 xf_emit(ctx, 0x9, 0); /* 1 */ 3012 xf_emit(ctx, 0x9, 0); /* 1 */
3032 else 3013 else
3033 xf_emit(ctx, 0x8, 0); /* 1 */ 3014 xf_emit(ctx, 0x8, 0); /* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3041 xf_emit(ctx, 1, 0x11); /* 7f */ 3022 xf_emit(ctx, 1, 0x11); /* 7f */
3042 xf_emit(ctx, 1, 1); /* 1 */ 3023 xf_emit(ctx, 1, 1); /* 1 */
3043 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ 3024 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
3044 if (IS_NVA3F(dev_priv->chipset)) { 3025 if (IS_NVA3F(device->chipset)) {
3045 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ 3026 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
3046 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3027 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3047 } 3028 }
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
3051static void 3032static void
3052nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) 3033nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3053{ 3034{
3054 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3035 struct nouveau_device *device = ctx->device;
3055 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ 3036 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
3056 if (dev_priv->chipset != 0x50) 3037 if (device->chipset != 0x50)
3057 xf_emit(ctx, 1, 0); /* 3 */ 3038 xf_emit(ctx, 1, 0); /* 3 */
3058 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ 3039 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
3059 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ 3040 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
3060 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ 3041 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
3061 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ 3042 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
3062 if (dev_priv->chipset == 0x50) 3043 if (device->chipset == 0x50)
3063 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ 3044 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
3064 else 3045 else
3065 xf_emit(ctx, 2, 0); /* 3ff, 1 */ 3046 xf_emit(ctx, 2, 0); /* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3071 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ 3052 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
3072 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ 3053 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
3073 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ 3054 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
3074 if (dev_priv->chipset == 0x50) { 3055 if (device->chipset == 0x50) {
3075 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ 3056 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
3076 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3057 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3077 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ 3058 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
3078 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ 3059 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
3079 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ 3060 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
3080 } else if (!IS_NVAAF(dev_priv->chipset)) { 3061 } else if (!IS_NVAAF(device->chipset)) {
3081 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ 3062 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
3082 xf_emit(ctx, 1, 0); /* 00000003 */ 3063 xf_emit(ctx, 1, 0); /* 00000003 */
3083 xf_emit(ctx, 1, 0); /* 000003ff */ 3064 xf_emit(ctx, 1, 0); /* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
3097static void 3078static void
3098nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) 3079nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3099{ 3080{
3100 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3081 struct nouveau_device *device = ctx->device;
3101 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ 3082 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
3102 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ 3083 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
3103 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ 3084 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3109 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3090 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
3110 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ 3091 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
3111 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ 3092 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
3112 if (IS_NVA3F(dev_priv->chipset)) 3093 if (IS_NVA3F(device->chipset))
3113 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3094 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3114 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ 3095 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
3115 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ 3096 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
3136static void 3117static void
3137nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) 3118nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3138{ 3119{
3139 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3120 struct nouveau_device *device = ctx->device;
3140 if (dev_priv->chipset < 0xa0) { 3121 if (device->chipset < 0xa0) {
3141 nv50_graph_construct_xfer_unk84xx(ctx); 3122 nv50_graph_construct_xfer_unk84xx(ctx);
3142 nv50_graph_construct_xfer_tprop(ctx); 3123 nv50_graph_construct_xfer_tprop(ctx);
3143 nv50_graph_construct_xfer_tex(ctx); 3124 nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
3153static void 3134static void
3154nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) 3135nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3155{ 3136{
3156 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3137 struct nouveau_device *device = ctx->device;
3157 int i, mpcnt = 2; 3138 int i, mpcnt = 2;
3158 switch (dev_priv->chipset) { 3139 switch (device->chipset) {
3159 case 0x98: 3140 case 0x98:
3160 case 0xaa: 3141 case 0xaa:
3161 mpcnt = 1; 3142 mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3182 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ 3163 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
3183 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ 3164 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
3184 xf_emit(ctx, 1, 0x04000400); /* ffffffff */ 3165 xf_emit(ctx, 1, 0x04000400); /* ffffffff */
3185 if (dev_priv->chipset >= 0xa0) 3166 if (device->chipset >= 0xa0)
3186 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ 3167 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
3187 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ 3168 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
3188 xf_emit(ctx, 1, 0); /* ff/3ff */ 3169 xf_emit(ctx, 1, 0); /* ff/3ff */
3189 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ 3170 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
3190 if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { 3171 if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
3191 xf_emit(ctx, 1, 0xe00); /* 7fff */ 3172 xf_emit(ctx, 1, 0xe00); /* 7fff */
3192 xf_emit(ctx, 1, 0x1e00); /* 7fff */ 3173 xf_emit(ctx, 1, 0x1e00); /* 7fff */
3193 } 3174 }
3194 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ 3175 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
3195 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ 3176 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
3196 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3177 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3197 if (dev_priv->chipset == 0x50) 3178 if (device->chipset == 0x50)
3198 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ 3179 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
3199 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ 3180 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
3200 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ 3181 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
3201 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ 3182 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
3202 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ 3183 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
3203 if (IS_NVAAF(dev_priv->chipset)) 3184 if (IS_NVAAF(device->chipset))
3204 xf_emit(ctx, 0xb, 0); /* RO */ 3185 xf_emit(ctx, 0xb, 0); /* RO */
3205 else if (dev_priv->chipset >= 0xa0) 3186 else if (device->chipset >= 0xa0)
3206 xf_emit(ctx, 0xc, 0); /* RO */ 3187 xf_emit(ctx, 0xc, 0); /* RO */
3207 else 3188 else
3208 xf_emit(ctx, 0xa, 0); /* RO */ 3189 xf_emit(ctx, 0xa, 0); /* RO */
3209 } 3190 }
3210 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ 3191 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
3211 xf_emit(ctx, 1, 0); /* ff/3ff */ 3192 xf_emit(ctx, 1, 0); /* ff/3ff */
3212 if (dev_priv->chipset >= 0xa0) { 3193 if (device->chipset >= 0xa0) {
3213 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ 3194 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
3214 } 3195 }
3215 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ 3196 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3223 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ 3204 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
3224 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ 3205 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
3225 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ 3206 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
3226 if (IS_NVA3F(dev_priv->chipset)) 3207 if (IS_NVA3F(device->chipset))
3227 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ 3208 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
3228 xf_emit(ctx, 1, 0); /* ff/3ff */ 3209 xf_emit(ctx, 1, 0); /* ff/3ff */
3229 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ 3210 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3238 xf_emit(ctx, 1, 0); /* 00000007 */ 3219 xf_emit(ctx, 1, 0); /* 00000007 */
3239 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ 3220 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
3240 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ 3221 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
3241 if (IS_NVA3F(dev_priv->chipset)) 3222 if (IS_NVA3F(device->chipset))
3242 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ 3223 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
3243 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ 3224 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
3244 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ 3225 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3253 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ 3234 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
3254 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ 3235 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
3255 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ 3236 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
3256 if (IS_NVA3F(dev_priv->chipset)) { 3237 if (IS_NVA3F(device->chipset)) {
3257 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ 3238 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
3258 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ 3239 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
3259 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ 3240 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3268 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ 3249 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
3269 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ 3250 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
3270 /* XXX: demagic this part some day */ 3251 /* XXX: demagic this part some day */
3271 if (dev_priv->chipset == 0x50) 3252 if (device->chipset == 0x50)
3272 xf_emit(ctx, 0x3a0, 0); 3253 xf_emit(ctx, 0x3a0, 0);
3273 else if (dev_priv->chipset < 0x94) 3254 else if (device->chipset < 0x94)
3274 xf_emit(ctx, 0x3a2, 0); 3255 xf_emit(ctx, 0x3a2, 0);
3275 else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) 3256 else if (device->chipset == 0x98 || device->chipset == 0xaa)
3276 xf_emit(ctx, 0x39f, 0); 3257 xf_emit(ctx, 0x39f, 0);
3277 else 3258 else
3278 xf_emit(ctx, 0x3a3, 0); 3259 xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
3285static void 3266static void
3286nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) 3267nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
3287{ 3268{
3288 struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; 3269 struct nouveau_device *device = ctx->device;
3289 int i; 3270 int i;
3290 uint32_t offset; 3271 u32 offset;
3291 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 3272 u32 units = nv_rd32 (ctx->device, 0x1540);
3292 int size = 0; 3273 int size = 0;
3293 3274
3294 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; 3275 offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
3295 3276
3296 if (dev_priv->chipset < 0xa0) { 3277 if (device->chipset < 0xa0) {
3297 for (i = 0; i < 8; i++) { 3278 for (i = 0; i < 8; i++) {
3298 ctx->ctxvals_pos = offset + i; 3279 ctx->ctxvals_pos = offset + i;
3299 /* that little bugger belongs to csched. No idea 3280 /* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
new file mode 100644
index 000000000000..0b7951a85943
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -0,0 +1,3039 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27void
28nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
29{
30 nv_wr32(priv, 0x400204, data);
31 nv_wr32(priv, 0x400200, icmd);
32 while (nv_rd32(priv, 0x400700) & 2) {}
33}
34
35int
36nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
37{
38 struct nouveau_bar *bar = nouveau_bar(priv);
39 struct nouveau_object *parent = nv_object(priv);
40 struct nouveau_gpuobj *chan;
41 u32 size = (0x80000 + priv->size + 4095) & ~4095;
42 int ret, i;
43
44 /* allocate memory to for a "channel", which we'll use to generate
45 * the default context values
46 */
47 ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
48 NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
49 chan = info->chan;
50 if (ret) {
51 nv_error(priv, "failed to allocate channel memory, %d\n", ret);
52 return ret;
53 }
54
55 /* PGD pointer */
56 nv_wo32(chan, 0x0200, lower_32_bits(chan->addr + 0x1000));
57 nv_wo32(chan, 0x0204, upper_32_bits(chan->addr + 0x1000));
58 nv_wo32(chan, 0x0208, 0xffffffff);
59 nv_wo32(chan, 0x020c, 0x000000ff);
60
61 /* PGT[0] pointer */
62 nv_wo32(chan, 0x1000, 0x00000000);
63 nv_wo32(chan, 0x1004, 0x00000001 | (chan->addr + 0x2000) >> 8);
64
65 /* identity-map the whole "channel" into its own vm */
66 for (i = 0; i < size / 4096; i++) {
67 u64 addr = ((chan->addr + (i * 4096)) >> 8) | 1;
68 nv_wo32(chan, 0x2000 + (i * 8), lower_32_bits(addr));
69 nv_wo32(chan, 0x2004 + (i * 8), upper_32_bits(addr));
70 }
71
72 /* context pointer (virt) */
73 nv_wo32(chan, 0x0210, 0x00080004);
74 nv_wo32(chan, 0x0214, 0x00000000);
75
76 bar->flush(bar);
77
78 nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
79 nv_wr32(priv, 0x100cbc, 0x80000001);
80 nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
81
82 /* setup default state for mmio list construction */
83 info->data = priv->mmio_data;
84 info->mmio = priv->mmio_list;
85 info->addr = 0x2000 + (i * 8);
86 info->priv = priv;
87 info->buffer_nr = 0;
88
89 if (priv->firmware) {
90 nv_wr32(priv, 0x409840, 0x00000030);
91 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
92 nv_wr32(priv, 0x409504, 0x00000003);
93 if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
94 nv_error(priv, "load_ctx timeout\n");
95
96 nv_wo32(chan, 0x8001c, 1);
97 nv_wo32(chan, 0x80020, 0);
98 nv_wo32(chan, 0x80028, 0);
99 nv_wo32(chan, 0x8002c, 0);
100 bar->flush(bar);
101 return 0;
102 }
103
104 /* HUB_FUC(SET_CHAN) */
105 nv_wr32(priv, 0x409840, 0x80000000);
106 nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
107 nv_wr32(priv, 0x409504, 0x00000001);
108 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
109 nv_error(priv, "HUB_SET_CHAN timeout\n");
110 nvc0_graph_ctxctl_debug(priv);
111 nouveau_gpuobj_ref(NULL, &info->chan);
112 return -EBUSY;
113 }
114
115 return 0;
116}
117
118void
119nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
120{
121 info->buffer[info->buffer_nr] = info->addr;
122 info->buffer[info->buffer_nr] += (align - 1);
123 info->buffer[info->buffer_nr] &= ~(align - 1);
124 info->addr = info->buffer[info->buffer_nr++] + size;
125
126 info->data->size = size;
127 info->data->align = align;
128 info->data->access = access;
129 info->data++;
130}
131
132void
133nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
134{
135 struct nvc0_graph_priv *priv = info->priv;
136
137 info->mmio->addr = addr;
138 info->mmio->data = data;
139 info->mmio->shift = shift;
140 info->mmio->buffer = buf;
141 info->mmio++;
142
143 if (shift)
144 data |= info->buffer[buf] >> shift;
145 nv_wr32(priv, addr, data);
146}
147
148int
149nvc0_grctx_fini(struct nvc0_grctx *info)
150{
151 struct nvc0_graph_priv *priv = info->priv;
152 int i;
153
154 /* trigger a context unload by unsetting the "next channel valid" bit
155 * and faking a context switch interrupt
156 */
157 nv_mask(priv, 0x409b04, 0x80000000, 0x00000000);
158 nv_wr32(priv, 0x409000, 0x00000100);
159 if (!nv_wait(priv, 0x409b00, 0x80000000, 0x00000000)) {
160 nv_error(priv, "grctx template channel unload timeout\n");
161 return -EBUSY;
162 }
163
164 priv->data = kmalloc(priv->size, GFP_KERNEL);
165 if (priv->data) {
166 for (i = 0; i < priv->size; i += 4)
167 priv->data[i / 4] = nv_ro32(info->chan, 0x80000 + i);
168 }
169
170 nouveau_gpuobj_ref(NULL, &info->chan);
171 return priv->data ? 0 : -ENOMEM;
172}
173
174static void
175nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
176{
177 u32 fermi = nvc0_graph_class(priv);
178 u32 mthd;
179
180 nv_mthd(priv, 0x9097, 0x0800, 0x00000000);
181 nv_mthd(priv, 0x9097, 0x0840, 0x00000000);
182 nv_mthd(priv, 0x9097, 0x0880, 0x00000000);
183 nv_mthd(priv, 0x9097, 0x08c0, 0x00000000);
184 nv_mthd(priv, 0x9097, 0x0900, 0x00000000);
185 nv_mthd(priv, 0x9097, 0x0940, 0x00000000);
186 nv_mthd(priv, 0x9097, 0x0980, 0x00000000);
187 nv_mthd(priv, 0x9097, 0x09c0, 0x00000000);
188 nv_mthd(priv, 0x9097, 0x0804, 0x00000000);
189 nv_mthd(priv, 0x9097, 0x0844, 0x00000000);
190 nv_mthd(priv, 0x9097, 0x0884, 0x00000000);
191 nv_mthd(priv, 0x9097, 0x08c4, 0x00000000);
192 nv_mthd(priv, 0x9097, 0x0904, 0x00000000);
193 nv_mthd(priv, 0x9097, 0x0944, 0x00000000);
194 nv_mthd(priv, 0x9097, 0x0984, 0x00000000);
195 nv_mthd(priv, 0x9097, 0x09c4, 0x00000000);
196 nv_mthd(priv, 0x9097, 0x0808, 0x00000400);
197 nv_mthd(priv, 0x9097, 0x0848, 0x00000400);
198 nv_mthd(priv, 0x9097, 0x0888, 0x00000400);
199 nv_mthd(priv, 0x9097, 0x08c8, 0x00000400);
200 nv_mthd(priv, 0x9097, 0x0908, 0x00000400);
201 nv_mthd(priv, 0x9097, 0x0948, 0x00000400);
202 nv_mthd(priv, 0x9097, 0x0988, 0x00000400);
203 nv_mthd(priv, 0x9097, 0x09c8, 0x00000400);
204 nv_mthd(priv, 0x9097, 0x080c, 0x00000300);
205 nv_mthd(priv, 0x9097, 0x084c, 0x00000300);
206 nv_mthd(priv, 0x9097, 0x088c, 0x00000300);
207 nv_mthd(priv, 0x9097, 0x08cc, 0x00000300);
208 nv_mthd(priv, 0x9097, 0x090c, 0x00000300);
209 nv_mthd(priv, 0x9097, 0x094c, 0x00000300);
210 nv_mthd(priv, 0x9097, 0x098c, 0x00000300);
211 nv_mthd(priv, 0x9097, 0x09cc, 0x00000300);
212 nv_mthd(priv, 0x9097, 0x0810, 0x000000cf);
213 nv_mthd(priv, 0x9097, 0x0850, 0x00000000);
214 nv_mthd(priv, 0x9097, 0x0890, 0x00000000);
215 nv_mthd(priv, 0x9097, 0x08d0, 0x00000000);
216 nv_mthd(priv, 0x9097, 0x0910, 0x00000000);
217 nv_mthd(priv, 0x9097, 0x0950, 0x00000000);
218 nv_mthd(priv, 0x9097, 0x0990, 0x00000000);
219 nv_mthd(priv, 0x9097, 0x09d0, 0x00000000);
220 nv_mthd(priv, 0x9097, 0x0814, 0x00000040);
221 nv_mthd(priv, 0x9097, 0x0854, 0x00000040);
222 nv_mthd(priv, 0x9097, 0x0894, 0x00000040);
223 nv_mthd(priv, 0x9097, 0x08d4, 0x00000040);
224 nv_mthd(priv, 0x9097, 0x0914, 0x00000040);
225 nv_mthd(priv, 0x9097, 0x0954, 0x00000040);
226 nv_mthd(priv, 0x9097, 0x0994, 0x00000040);
227 nv_mthd(priv, 0x9097, 0x09d4, 0x00000040);
228 nv_mthd(priv, 0x9097, 0x0818, 0x00000001);
229 nv_mthd(priv, 0x9097, 0x0858, 0x00000001);
230 nv_mthd(priv, 0x9097, 0x0898, 0x00000001);
231 nv_mthd(priv, 0x9097, 0x08d8, 0x00000001);
232 nv_mthd(priv, 0x9097, 0x0918, 0x00000001);
233 nv_mthd(priv, 0x9097, 0x0958, 0x00000001);
234 nv_mthd(priv, 0x9097, 0x0998, 0x00000001);
235 nv_mthd(priv, 0x9097, 0x09d8, 0x00000001);
236 nv_mthd(priv, 0x9097, 0x081c, 0x00000000);
237 nv_mthd(priv, 0x9097, 0x085c, 0x00000000);
238 nv_mthd(priv, 0x9097, 0x089c, 0x00000000);
239 nv_mthd(priv, 0x9097, 0x08dc, 0x00000000);
240 nv_mthd(priv, 0x9097, 0x091c, 0x00000000);
241 nv_mthd(priv, 0x9097, 0x095c, 0x00000000);
242 nv_mthd(priv, 0x9097, 0x099c, 0x00000000);
243 nv_mthd(priv, 0x9097, 0x09dc, 0x00000000);
244 nv_mthd(priv, 0x9097, 0x0820, 0x00000000);
245 nv_mthd(priv, 0x9097, 0x0860, 0x00000000);
246 nv_mthd(priv, 0x9097, 0x08a0, 0x00000000);
247 nv_mthd(priv, 0x9097, 0x08e0, 0x00000000);
248 nv_mthd(priv, 0x9097, 0x0920, 0x00000000);
249 nv_mthd(priv, 0x9097, 0x0960, 0x00000000);
250 nv_mthd(priv, 0x9097, 0x09a0, 0x00000000);
251 nv_mthd(priv, 0x9097, 0x09e0, 0x00000000);
252 nv_mthd(priv, 0x9097, 0x2700, 0x00000000);
253 nv_mthd(priv, 0x9097, 0x2720, 0x00000000);
254 nv_mthd(priv, 0x9097, 0x2740, 0x00000000);
255 nv_mthd(priv, 0x9097, 0x2760, 0x00000000);
256 nv_mthd(priv, 0x9097, 0x2780, 0x00000000);
257 nv_mthd(priv, 0x9097, 0x27a0, 0x00000000);
258 nv_mthd(priv, 0x9097, 0x27c0, 0x00000000);
259 nv_mthd(priv, 0x9097, 0x27e0, 0x00000000);
260 nv_mthd(priv, 0x9097, 0x2704, 0x00000000);
261 nv_mthd(priv, 0x9097, 0x2724, 0x00000000);
262 nv_mthd(priv, 0x9097, 0x2744, 0x00000000);
263 nv_mthd(priv, 0x9097, 0x2764, 0x00000000);
264 nv_mthd(priv, 0x9097, 0x2784, 0x00000000);
265 nv_mthd(priv, 0x9097, 0x27a4, 0x00000000);
266 nv_mthd(priv, 0x9097, 0x27c4, 0x00000000);
267 nv_mthd(priv, 0x9097, 0x27e4, 0x00000000);
268 nv_mthd(priv, 0x9097, 0x2708, 0x00000000);
269 nv_mthd(priv, 0x9097, 0x2728, 0x00000000);
270 nv_mthd(priv, 0x9097, 0x2748, 0x00000000);
271 nv_mthd(priv, 0x9097, 0x2768, 0x00000000);
272 nv_mthd(priv, 0x9097, 0x2788, 0x00000000);
273 nv_mthd(priv, 0x9097, 0x27a8, 0x00000000);
274 nv_mthd(priv, 0x9097, 0x27c8, 0x00000000);
275 nv_mthd(priv, 0x9097, 0x27e8, 0x00000000);
276 nv_mthd(priv, 0x9097, 0x270c, 0x00000000);
277 nv_mthd(priv, 0x9097, 0x272c, 0x00000000);
278 nv_mthd(priv, 0x9097, 0x274c, 0x00000000);
279 nv_mthd(priv, 0x9097, 0x276c, 0x00000000);
280 nv_mthd(priv, 0x9097, 0x278c, 0x00000000);
281 nv_mthd(priv, 0x9097, 0x27ac, 0x00000000);
282 nv_mthd(priv, 0x9097, 0x27cc, 0x00000000);
283 nv_mthd(priv, 0x9097, 0x27ec, 0x00000000);
284 nv_mthd(priv, 0x9097, 0x2710, 0x00014000);
285 nv_mthd(priv, 0x9097, 0x2730, 0x00014000);
286 nv_mthd(priv, 0x9097, 0x2750, 0x00014000);
287 nv_mthd(priv, 0x9097, 0x2770, 0x00014000);
288 nv_mthd(priv, 0x9097, 0x2790, 0x00014000);
289 nv_mthd(priv, 0x9097, 0x27b0, 0x00014000);
290 nv_mthd(priv, 0x9097, 0x27d0, 0x00014000);
291 nv_mthd(priv, 0x9097, 0x27f0, 0x00014000);
292 nv_mthd(priv, 0x9097, 0x2714, 0x00000040);
293 nv_mthd(priv, 0x9097, 0x2734, 0x00000040);
294 nv_mthd(priv, 0x9097, 0x2754, 0x00000040);
295 nv_mthd(priv, 0x9097, 0x2774, 0x00000040);
296 nv_mthd(priv, 0x9097, 0x2794, 0x00000040);
297 nv_mthd(priv, 0x9097, 0x27b4, 0x00000040);
298 nv_mthd(priv, 0x9097, 0x27d4, 0x00000040);
299 nv_mthd(priv, 0x9097, 0x27f4, 0x00000040);
300 nv_mthd(priv, 0x9097, 0x1c00, 0x00000000);
301 nv_mthd(priv, 0x9097, 0x1c10, 0x00000000);
302 nv_mthd(priv, 0x9097, 0x1c20, 0x00000000);
303 nv_mthd(priv, 0x9097, 0x1c30, 0x00000000);
304 nv_mthd(priv, 0x9097, 0x1c40, 0x00000000);
305 nv_mthd(priv, 0x9097, 0x1c50, 0x00000000);
306 nv_mthd(priv, 0x9097, 0x1c60, 0x00000000);
307 nv_mthd(priv, 0x9097, 0x1c70, 0x00000000);
308 nv_mthd(priv, 0x9097, 0x1c80, 0x00000000);
309 nv_mthd(priv, 0x9097, 0x1c90, 0x00000000);
310 nv_mthd(priv, 0x9097, 0x1ca0, 0x00000000);
311 nv_mthd(priv, 0x9097, 0x1cb0, 0x00000000);
312 nv_mthd(priv, 0x9097, 0x1cc0, 0x00000000);
313 nv_mthd(priv, 0x9097, 0x1cd0, 0x00000000);
314 nv_mthd(priv, 0x9097, 0x1ce0, 0x00000000);
315 nv_mthd(priv, 0x9097, 0x1cf0, 0x00000000);
316 nv_mthd(priv, 0x9097, 0x1c04, 0x00000000);
317 nv_mthd(priv, 0x9097, 0x1c14, 0x00000000);
318 nv_mthd(priv, 0x9097, 0x1c24, 0x00000000);
319 nv_mthd(priv, 0x9097, 0x1c34, 0x00000000);
320 nv_mthd(priv, 0x9097, 0x1c44, 0x00000000);
321 nv_mthd(priv, 0x9097, 0x1c54, 0x00000000);
322 nv_mthd(priv, 0x9097, 0x1c64, 0x00000000);
323 nv_mthd(priv, 0x9097, 0x1c74, 0x00000000);
324 nv_mthd(priv, 0x9097, 0x1c84, 0x00000000);
325 nv_mthd(priv, 0x9097, 0x1c94, 0x00000000);
326 nv_mthd(priv, 0x9097, 0x1ca4, 0x00000000);
327 nv_mthd(priv, 0x9097, 0x1cb4, 0x00000000);
328 nv_mthd(priv, 0x9097, 0x1cc4, 0x00000000);
329 nv_mthd(priv, 0x9097, 0x1cd4, 0x00000000);
330 nv_mthd(priv, 0x9097, 0x1ce4, 0x00000000);
331 nv_mthd(priv, 0x9097, 0x1cf4, 0x00000000);
332 nv_mthd(priv, 0x9097, 0x1c08, 0x00000000);
333 nv_mthd(priv, 0x9097, 0x1c18, 0x00000000);
334 nv_mthd(priv, 0x9097, 0x1c28, 0x00000000);
335 nv_mthd(priv, 0x9097, 0x1c38, 0x00000000);
336 nv_mthd(priv, 0x9097, 0x1c48, 0x00000000);
337 nv_mthd(priv, 0x9097, 0x1c58, 0x00000000);
338 nv_mthd(priv, 0x9097, 0x1c68, 0x00000000);
339 nv_mthd(priv, 0x9097, 0x1c78, 0x00000000);
340 nv_mthd(priv, 0x9097, 0x1c88, 0x00000000);
341 nv_mthd(priv, 0x9097, 0x1c98, 0x00000000);
342 nv_mthd(priv, 0x9097, 0x1ca8, 0x00000000);
343 nv_mthd(priv, 0x9097, 0x1cb8, 0x00000000);
344 nv_mthd(priv, 0x9097, 0x1cc8, 0x00000000);
345 nv_mthd(priv, 0x9097, 0x1cd8, 0x00000000);
346 nv_mthd(priv, 0x9097, 0x1ce8, 0x00000000);
347 nv_mthd(priv, 0x9097, 0x1cf8, 0x00000000);
348 nv_mthd(priv, 0x9097, 0x1c0c, 0x00000000);
349 nv_mthd(priv, 0x9097, 0x1c1c, 0x00000000);
350 nv_mthd(priv, 0x9097, 0x1c2c, 0x00000000);
351 nv_mthd(priv, 0x9097, 0x1c3c, 0x00000000);
352 nv_mthd(priv, 0x9097, 0x1c4c, 0x00000000);
353 nv_mthd(priv, 0x9097, 0x1c5c, 0x00000000);
354 nv_mthd(priv, 0x9097, 0x1c6c, 0x00000000);
355 nv_mthd(priv, 0x9097, 0x1c7c, 0x00000000);
356 nv_mthd(priv, 0x9097, 0x1c8c, 0x00000000);
357 nv_mthd(priv, 0x9097, 0x1c9c, 0x00000000);
358 nv_mthd(priv, 0x9097, 0x1cac, 0x00000000);
359 nv_mthd(priv, 0x9097, 0x1cbc, 0x00000000);
360 nv_mthd(priv, 0x9097, 0x1ccc, 0x00000000);
361 nv_mthd(priv, 0x9097, 0x1cdc, 0x00000000);
362 nv_mthd(priv, 0x9097, 0x1cec, 0x00000000);
363 nv_mthd(priv, 0x9097, 0x1cfc, 0x00000000);
364 nv_mthd(priv, 0x9097, 0x1d00, 0x00000000);
365 nv_mthd(priv, 0x9097, 0x1d10, 0x00000000);
366 nv_mthd(priv, 0x9097, 0x1d20, 0x00000000);
367 nv_mthd(priv, 0x9097, 0x1d30, 0x00000000);
368 nv_mthd(priv, 0x9097, 0x1d40, 0x00000000);
369 nv_mthd(priv, 0x9097, 0x1d50, 0x00000000);
370 nv_mthd(priv, 0x9097, 0x1d60, 0x00000000);
371 nv_mthd(priv, 0x9097, 0x1d70, 0x00000000);
372 nv_mthd(priv, 0x9097, 0x1d80, 0x00000000);
373 nv_mthd(priv, 0x9097, 0x1d90, 0x00000000);
374 nv_mthd(priv, 0x9097, 0x1da0, 0x00000000);
375 nv_mthd(priv, 0x9097, 0x1db0, 0x00000000);
376 nv_mthd(priv, 0x9097, 0x1dc0, 0x00000000);
377 nv_mthd(priv, 0x9097, 0x1dd0, 0x00000000);
378 nv_mthd(priv, 0x9097, 0x1de0, 0x00000000);
379 nv_mthd(priv, 0x9097, 0x1df0, 0x00000000);
380 nv_mthd(priv, 0x9097, 0x1d04, 0x00000000);
381 nv_mthd(priv, 0x9097, 0x1d14, 0x00000000);
382 nv_mthd(priv, 0x9097, 0x1d24, 0x00000000);
383 nv_mthd(priv, 0x9097, 0x1d34, 0x00000000);
384 nv_mthd(priv, 0x9097, 0x1d44, 0x00000000);
385 nv_mthd(priv, 0x9097, 0x1d54, 0x00000000);
386 nv_mthd(priv, 0x9097, 0x1d64, 0x00000000);
387 nv_mthd(priv, 0x9097, 0x1d74, 0x00000000);
388 nv_mthd(priv, 0x9097, 0x1d84, 0x00000000);
389 nv_mthd(priv, 0x9097, 0x1d94, 0x00000000);
390 nv_mthd(priv, 0x9097, 0x1da4, 0x00000000);
391 nv_mthd(priv, 0x9097, 0x1db4, 0x00000000);
392 nv_mthd(priv, 0x9097, 0x1dc4, 0x00000000);
393 nv_mthd(priv, 0x9097, 0x1dd4, 0x00000000);
394 nv_mthd(priv, 0x9097, 0x1de4, 0x00000000);
395 nv_mthd(priv, 0x9097, 0x1df4, 0x00000000);
396 nv_mthd(priv, 0x9097, 0x1d08, 0x00000000);
397 nv_mthd(priv, 0x9097, 0x1d18, 0x00000000);
398 nv_mthd(priv, 0x9097, 0x1d28, 0x00000000);
399 nv_mthd(priv, 0x9097, 0x1d38, 0x00000000);
400 nv_mthd(priv, 0x9097, 0x1d48, 0x00000000);
401 nv_mthd(priv, 0x9097, 0x1d58, 0x00000000);
402 nv_mthd(priv, 0x9097, 0x1d68, 0x00000000);
403 nv_mthd(priv, 0x9097, 0x1d78, 0x00000000);
404 nv_mthd(priv, 0x9097, 0x1d88, 0x00000000);
405 nv_mthd(priv, 0x9097, 0x1d98, 0x00000000);
406 nv_mthd(priv, 0x9097, 0x1da8, 0x00000000);
407 nv_mthd(priv, 0x9097, 0x1db8, 0x00000000);
408 nv_mthd(priv, 0x9097, 0x1dc8, 0x00000000);
409 nv_mthd(priv, 0x9097, 0x1dd8, 0x00000000);
410 nv_mthd(priv, 0x9097, 0x1de8, 0x00000000);
411 nv_mthd(priv, 0x9097, 0x1df8, 0x00000000);
412 nv_mthd(priv, 0x9097, 0x1d0c, 0x00000000);
413 nv_mthd(priv, 0x9097, 0x1d1c, 0x00000000);
414 nv_mthd(priv, 0x9097, 0x1d2c, 0x00000000);
415 nv_mthd(priv, 0x9097, 0x1d3c, 0x00000000);
416 nv_mthd(priv, 0x9097, 0x1d4c, 0x00000000);
417 nv_mthd(priv, 0x9097, 0x1d5c, 0x00000000);
418 nv_mthd(priv, 0x9097, 0x1d6c, 0x00000000);
419 nv_mthd(priv, 0x9097, 0x1d7c, 0x00000000);
420 nv_mthd(priv, 0x9097, 0x1d8c, 0x00000000);
421 nv_mthd(priv, 0x9097, 0x1d9c, 0x00000000);
422 nv_mthd(priv, 0x9097, 0x1dac, 0x00000000);
423 nv_mthd(priv, 0x9097, 0x1dbc, 0x00000000);
424 nv_mthd(priv, 0x9097, 0x1dcc, 0x00000000);
425 nv_mthd(priv, 0x9097, 0x1ddc, 0x00000000);
426 nv_mthd(priv, 0x9097, 0x1dec, 0x00000000);
427 nv_mthd(priv, 0x9097, 0x1dfc, 0x00000000);
428 nv_mthd(priv, 0x9097, 0x1f00, 0x00000000);
429 nv_mthd(priv, 0x9097, 0x1f08, 0x00000000);
430 nv_mthd(priv, 0x9097, 0x1f10, 0x00000000);
431 nv_mthd(priv, 0x9097, 0x1f18, 0x00000000);
432 nv_mthd(priv, 0x9097, 0x1f20, 0x00000000);
433 nv_mthd(priv, 0x9097, 0x1f28, 0x00000000);
434 nv_mthd(priv, 0x9097, 0x1f30, 0x00000000);
435 nv_mthd(priv, 0x9097, 0x1f38, 0x00000000);
436 nv_mthd(priv, 0x9097, 0x1f40, 0x00000000);
437 nv_mthd(priv, 0x9097, 0x1f48, 0x00000000);
438 nv_mthd(priv, 0x9097, 0x1f50, 0x00000000);
439 nv_mthd(priv, 0x9097, 0x1f58, 0x00000000);
440 nv_mthd(priv, 0x9097, 0x1f60, 0x00000000);
441 nv_mthd(priv, 0x9097, 0x1f68, 0x00000000);
442 nv_mthd(priv, 0x9097, 0x1f70, 0x00000000);
443 nv_mthd(priv, 0x9097, 0x1f78, 0x00000000);
444 nv_mthd(priv, 0x9097, 0x1f04, 0x00000000);
445 nv_mthd(priv, 0x9097, 0x1f0c, 0x00000000);
446 nv_mthd(priv, 0x9097, 0x1f14, 0x00000000);
447 nv_mthd(priv, 0x9097, 0x1f1c, 0x00000000);
448 nv_mthd(priv, 0x9097, 0x1f24, 0x00000000);
449 nv_mthd(priv, 0x9097, 0x1f2c, 0x00000000);
450 nv_mthd(priv, 0x9097, 0x1f34, 0x00000000);
451 nv_mthd(priv, 0x9097, 0x1f3c, 0x00000000);
452 nv_mthd(priv, 0x9097, 0x1f44, 0x00000000);
453 nv_mthd(priv, 0x9097, 0x1f4c, 0x00000000);
454 nv_mthd(priv, 0x9097, 0x1f54, 0x00000000);
455 nv_mthd(priv, 0x9097, 0x1f5c, 0x00000000);
456 nv_mthd(priv, 0x9097, 0x1f64, 0x00000000);
457 nv_mthd(priv, 0x9097, 0x1f6c, 0x00000000);
458 nv_mthd(priv, 0x9097, 0x1f74, 0x00000000);
459 nv_mthd(priv, 0x9097, 0x1f7c, 0x00000000);
460 nv_mthd(priv, 0x9097, 0x1f80, 0x00000000);
461 nv_mthd(priv, 0x9097, 0x1f88, 0x00000000);
462 nv_mthd(priv, 0x9097, 0x1f90, 0x00000000);
463 nv_mthd(priv, 0x9097, 0x1f98, 0x00000000);
464 nv_mthd(priv, 0x9097, 0x1fa0, 0x00000000);
465 nv_mthd(priv, 0x9097, 0x1fa8, 0x00000000);
466 nv_mthd(priv, 0x9097, 0x1fb0, 0x00000000);
467 nv_mthd(priv, 0x9097, 0x1fb8, 0x00000000);
468 nv_mthd(priv, 0x9097, 0x1fc0, 0x00000000);
469 nv_mthd(priv, 0x9097, 0x1fc8, 0x00000000);
470 nv_mthd(priv, 0x9097, 0x1fd0, 0x00000000);
471 nv_mthd(priv, 0x9097, 0x1fd8, 0x00000000);
472 nv_mthd(priv, 0x9097, 0x1fe0, 0x00000000);
473 nv_mthd(priv, 0x9097, 0x1fe8, 0x00000000);
474 nv_mthd(priv, 0x9097, 0x1ff0, 0x00000000);
475 nv_mthd(priv, 0x9097, 0x1ff8, 0x00000000);
476 nv_mthd(priv, 0x9097, 0x1f84, 0x00000000);
477 nv_mthd(priv, 0x9097, 0x1f8c, 0x00000000);
478 nv_mthd(priv, 0x9097, 0x1f94, 0x00000000);
479 nv_mthd(priv, 0x9097, 0x1f9c, 0x00000000);
480 nv_mthd(priv, 0x9097, 0x1fa4, 0x00000000);
481 nv_mthd(priv, 0x9097, 0x1fac, 0x00000000);
482 nv_mthd(priv, 0x9097, 0x1fb4, 0x00000000);
483 nv_mthd(priv, 0x9097, 0x1fbc, 0x00000000);
484 nv_mthd(priv, 0x9097, 0x1fc4, 0x00000000);
485 nv_mthd(priv, 0x9097, 0x1fcc, 0x00000000);
486 nv_mthd(priv, 0x9097, 0x1fd4, 0x00000000);
487 nv_mthd(priv, 0x9097, 0x1fdc, 0x00000000);
488 nv_mthd(priv, 0x9097, 0x1fe4, 0x00000000);
489 nv_mthd(priv, 0x9097, 0x1fec, 0x00000000);
490 nv_mthd(priv, 0x9097, 0x1ff4, 0x00000000);
491 nv_mthd(priv, 0x9097, 0x1ffc, 0x00000000);
492 nv_mthd(priv, 0x9097, 0x2200, 0x00000022);
493 nv_mthd(priv, 0x9097, 0x2210, 0x00000022);
494 nv_mthd(priv, 0x9097, 0x2220, 0x00000022);
495 nv_mthd(priv, 0x9097, 0x2230, 0x00000022);
496 nv_mthd(priv, 0x9097, 0x2240, 0x00000022);
497 nv_mthd(priv, 0x9097, 0x2000, 0x00000000);
498 nv_mthd(priv, 0x9097, 0x2040, 0x00000011);
499 nv_mthd(priv, 0x9097, 0x2080, 0x00000020);
500 nv_mthd(priv, 0x9097, 0x20c0, 0x00000030);
501 nv_mthd(priv, 0x9097, 0x2100, 0x00000040);
502 nv_mthd(priv, 0x9097, 0x2140, 0x00000051);
503 nv_mthd(priv, 0x9097, 0x200c, 0x00000001);
504 nv_mthd(priv, 0x9097, 0x204c, 0x00000001);
505 nv_mthd(priv, 0x9097, 0x208c, 0x00000001);
506 nv_mthd(priv, 0x9097, 0x20cc, 0x00000001);
507 nv_mthd(priv, 0x9097, 0x210c, 0x00000001);
508 nv_mthd(priv, 0x9097, 0x214c, 0x00000001);
509 nv_mthd(priv, 0x9097, 0x2010, 0x00000000);
510 nv_mthd(priv, 0x9097, 0x2050, 0x00000000);
511 nv_mthd(priv, 0x9097, 0x2090, 0x00000001);
512 nv_mthd(priv, 0x9097, 0x20d0, 0x00000002);
513 nv_mthd(priv, 0x9097, 0x2110, 0x00000003);
514 nv_mthd(priv, 0x9097, 0x2150, 0x00000004);
515 nv_mthd(priv, 0x9097, 0x0380, 0x00000000);
516 nv_mthd(priv, 0x9097, 0x03a0, 0x00000000);
517 nv_mthd(priv, 0x9097, 0x03c0, 0x00000000);
518 nv_mthd(priv, 0x9097, 0x03e0, 0x00000000);
519 nv_mthd(priv, 0x9097, 0x0384, 0x00000000);
520 nv_mthd(priv, 0x9097, 0x03a4, 0x00000000);
521 nv_mthd(priv, 0x9097, 0x03c4, 0x00000000);
522 nv_mthd(priv, 0x9097, 0x03e4, 0x00000000);
523 nv_mthd(priv, 0x9097, 0x0388, 0x00000000);
524 nv_mthd(priv, 0x9097, 0x03a8, 0x00000000);
525 nv_mthd(priv, 0x9097, 0x03c8, 0x00000000);
526 nv_mthd(priv, 0x9097, 0x03e8, 0x00000000);
527 nv_mthd(priv, 0x9097, 0x038c, 0x00000000);
528 nv_mthd(priv, 0x9097, 0x03ac, 0x00000000);
529 nv_mthd(priv, 0x9097, 0x03cc, 0x00000000);
530 nv_mthd(priv, 0x9097, 0x03ec, 0x00000000);
531 nv_mthd(priv, 0x9097, 0x0700, 0x00000000);
532 nv_mthd(priv, 0x9097, 0x0710, 0x00000000);
533 nv_mthd(priv, 0x9097, 0x0720, 0x00000000);
534 nv_mthd(priv, 0x9097, 0x0730, 0x00000000);
535 nv_mthd(priv, 0x9097, 0x0704, 0x00000000);
536 nv_mthd(priv, 0x9097, 0x0714, 0x00000000);
537 nv_mthd(priv, 0x9097, 0x0724, 0x00000000);
538 nv_mthd(priv, 0x9097, 0x0734, 0x00000000);
539 nv_mthd(priv, 0x9097, 0x0708, 0x00000000);
540 nv_mthd(priv, 0x9097, 0x0718, 0x00000000);
541 nv_mthd(priv, 0x9097, 0x0728, 0x00000000);
542 nv_mthd(priv, 0x9097, 0x0738, 0x00000000);
543 nv_mthd(priv, 0x9097, 0x2800, 0x00000000);
544 nv_mthd(priv, 0x9097, 0x2804, 0x00000000);
545 nv_mthd(priv, 0x9097, 0x2808, 0x00000000);
546 nv_mthd(priv, 0x9097, 0x280c, 0x00000000);
547 nv_mthd(priv, 0x9097, 0x2810, 0x00000000);
548 nv_mthd(priv, 0x9097, 0x2814, 0x00000000);
549 nv_mthd(priv, 0x9097, 0x2818, 0x00000000);
550 nv_mthd(priv, 0x9097, 0x281c, 0x00000000);
551 nv_mthd(priv, 0x9097, 0x2820, 0x00000000);
552 nv_mthd(priv, 0x9097, 0x2824, 0x00000000);
553 nv_mthd(priv, 0x9097, 0x2828, 0x00000000);
554 nv_mthd(priv, 0x9097, 0x282c, 0x00000000);
555 nv_mthd(priv, 0x9097, 0x2830, 0x00000000);
556 nv_mthd(priv, 0x9097, 0x2834, 0x00000000);
557 nv_mthd(priv, 0x9097, 0x2838, 0x00000000);
558 nv_mthd(priv, 0x9097, 0x283c, 0x00000000);
559 nv_mthd(priv, 0x9097, 0x2840, 0x00000000);
560 nv_mthd(priv, 0x9097, 0x2844, 0x00000000);
561 nv_mthd(priv, 0x9097, 0x2848, 0x00000000);
562 nv_mthd(priv, 0x9097, 0x284c, 0x00000000);
563 nv_mthd(priv, 0x9097, 0x2850, 0x00000000);
564 nv_mthd(priv, 0x9097, 0x2854, 0x00000000);
565 nv_mthd(priv, 0x9097, 0x2858, 0x00000000);
566 nv_mthd(priv, 0x9097, 0x285c, 0x00000000);
567 nv_mthd(priv, 0x9097, 0x2860, 0x00000000);
568 nv_mthd(priv, 0x9097, 0x2864, 0x00000000);
569 nv_mthd(priv, 0x9097, 0x2868, 0x00000000);
570 nv_mthd(priv, 0x9097, 0x286c, 0x00000000);
571 nv_mthd(priv, 0x9097, 0x2870, 0x00000000);
572 nv_mthd(priv, 0x9097, 0x2874, 0x00000000);
573 nv_mthd(priv, 0x9097, 0x2878, 0x00000000);
574 nv_mthd(priv, 0x9097, 0x287c, 0x00000000);
575 nv_mthd(priv, 0x9097, 0x2880, 0x00000000);
576 nv_mthd(priv, 0x9097, 0x2884, 0x00000000);
577 nv_mthd(priv, 0x9097, 0x2888, 0x00000000);
578 nv_mthd(priv, 0x9097, 0x288c, 0x00000000);
579 nv_mthd(priv, 0x9097, 0x2890, 0x00000000);
580 nv_mthd(priv, 0x9097, 0x2894, 0x00000000);
581 nv_mthd(priv, 0x9097, 0x2898, 0x00000000);
582 nv_mthd(priv, 0x9097, 0x289c, 0x00000000);
583 nv_mthd(priv, 0x9097, 0x28a0, 0x00000000);
584 nv_mthd(priv, 0x9097, 0x28a4, 0x00000000);
585 nv_mthd(priv, 0x9097, 0x28a8, 0x00000000);
586 nv_mthd(priv, 0x9097, 0x28ac, 0x00000000);
587 nv_mthd(priv, 0x9097, 0x28b0, 0x00000000);
588 nv_mthd(priv, 0x9097, 0x28b4, 0x00000000);
589 nv_mthd(priv, 0x9097, 0x28b8, 0x00000000);
590 nv_mthd(priv, 0x9097, 0x28bc, 0x00000000);
591 nv_mthd(priv, 0x9097, 0x28c0, 0x00000000);
592 nv_mthd(priv, 0x9097, 0x28c4, 0x00000000);
593 nv_mthd(priv, 0x9097, 0x28c8, 0x00000000);
594 nv_mthd(priv, 0x9097, 0x28cc, 0x00000000);
595 nv_mthd(priv, 0x9097, 0x28d0, 0x00000000);
596 nv_mthd(priv, 0x9097, 0x28d4, 0x00000000);
597 nv_mthd(priv, 0x9097, 0x28d8, 0x00000000);
598 nv_mthd(priv, 0x9097, 0x28dc, 0x00000000);
599 nv_mthd(priv, 0x9097, 0x28e0, 0x00000000);
600 nv_mthd(priv, 0x9097, 0x28e4, 0x00000000);
601 nv_mthd(priv, 0x9097, 0x28e8, 0x00000000);
602 nv_mthd(priv, 0x9097, 0x28ec, 0x00000000);
603 nv_mthd(priv, 0x9097, 0x28f0, 0x00000000);
604 nv_mthd(priv, 0x9097, 0x28f4, 0x00000000);
605 nv_mthd(priv, 0x9097, 0x28f8, 0x00000000);
606 nv_mthd(priv, 0x9097, 0x28fc, 0x00000000);
607 nv_mthd(priv, 0x9097, 0x2900, 0x00000000);
608 nv_mthd(priv, 0x9097, 0x2904, 0x00000000);
609 nv_mthd(priv, 0x9097, 0x2908, 0x00000000);
610 nv_mthd(priv, 0x9097, 0x290c, 0x00000000);
611 nv_mthd(priv, 0x9097, 0x2910, 0x00000000);
612 nv_mthd(priv, 0x9097, 0x2914, 0x00000000);
613 nv_mthd(priv, 0x9097, 0x2918, 0x00000000);
614 nv_mthd(priv, 0x9097, 0x291c, 0x00000000);
615 nv_mthd(priv, 0x9097, 0x2920, 0x00000000);
616 nv_mthd(priv, 0x9097, 0x2924, 0x00000000);
617 nv_mthd(priv, 0x9097, 0x2928, 0x00000000);
618 nv_mthd(priv, 0x9097, 0x292c, 0x00000000);
619 nv_mthd(priv, 0x9097, 0x2930, 0x00000000);
620 nv_mthd(priv, 0x9097, 0x2934, 0x00000000);
621 nv_mthd(priv, 0x9097, 0x2938, 0x00000000);
622 nv_mthd(priv, 0x9097, 0x293c, 0x00000000);
623 nv_mthd(priv, 0x9097, 0x2940, 0x00000000);
624 nv_mthd(priv, 0x9097, 0x2944, 0x00000000);
625 nv_mthd(priv, 0x9097, 0x2948, 0x00000000);
626 nv_mthd(priv, 0x9097, 0x294c, 0x00000000);
627 nv_mthd(priv, 0x9097, 0x2950, 0x00000000);
628 nv_mthd(priv, 0x9097, 0x2954, 0x00000000);
629 nv_mthd(priv, 0x9097, 0x2958, 0x00000000);
630 nv_mthd(priv, 0x9097, 0x295c, 0x00000000);
631 nv_mthd(priv, 0x9097, 0x2960, 0x00000000);
632 nv_mthd(priv, 0x9097, 0x2964, 0x00000000);
633 nv_mthd(priv, 0x9097, 0x2968, 0x00000000);
634 nv_mthd(priv, 0x9097, 0x296c, 0x00000000);
635 nv_mthd(priv, 0x9097, 0x2970, 0x00000000);
636 nv_mthd(priv, 0x9097, 0x2974, 0x00000000);
637 nv_mthd(priv, 0x9097, 0x2978, 0x00000000);
638 nv_mthd(priv, 0x9097, 0x297c, 0x00000000);
639 nv_mthd(priv, 0x9097, 0x2980, 0x00000000);
640 nv_mthd(priv, 0x9097, 0x2984, 0x00000000);
641 nv_mthd(priv, 0x9097, 0x2988, 0x00000000);
642 nv_mthd(priv, 0x9097, 0x298c, 0x00000000);
643 nv_mthd(priv, 0x9097, 0x2990, 0x00000000);
644 nv_mthd(priv, 0x9097, 0x2994, 0x00000000);
645 nv_mthd(priv, 0x9097, 0x2998, 0x00000000);
646 nv_mthd(priv, 0x9097, 0x299c, 0x00000000);
647 nv_mthd(priv, 0x9097, 0x29a0, 0x00000000);
648 nv_mthd(priv, 0x9097, 0x29a4, 0x00000000);
649 nv_mthd(priv, 0x9097, 0x29a8, 0x00000000);
650 nv_mthd(priv, 0x9097, 0x29ac, 0x00000000);
651 nv_mthd(priv, 0x9097, 0x29b0, 0x00000000);
652 nv_mthd(priv, 0x9097, 0x29b4, 0x00000000);
653 nv_mthd(priv, 0x9097, 0x29b8, 0x00000000);
654 nv_mthd(priv, 0x9097, 0x29bc, 0x00000000);
655 nv_mthd(priv, 0x9097, 0x29c0, 0x00000000);
656 nv_mthd(priv, 0x9097, 0x29c4, 0x00000000);
657 nv_mthd(priv, 0x9097, 0x29c8, 0x00000000);
658 nv_mthd(priv, 0x9097, 0x29cc, 0x00000000);
659 nv_mthd(priv, 0x9097, 0x29d0, 0x00000000);
660 nv_mthd(priv, 0x9097, 0x29d4, 0x00000000);
661 nv_mthd(priv, 0x9097, 0x29d8, 0x00000000);
662 nv_mthd(priv, 0x9097, 0x29dc, 0x00000000);
663 nv_mthd(priv, 0x9097, 0x29e0, 0x00000000);
664 nv_mthd(priv, 0x9097, 0x29e4, 0x00000000);
665 nv_mthd(priv, 0x9097, 0x29e8, 0x00000000);
666 nv_mthd(priv, 0x9097, 0x29ec, 0x00000000);
667 nv_mthd(priv, 0x9097, 0x29f0, 0x00000000);
668 nv_mthd(priv, 0x9097, 0x29f4, 0x00000000);
669 nv_mthd(priv, 0x9097, 0x29f8, 0x00000000);
670 nv_mthd(priv, 0x9097, 0x29fc, 0x00000000);
671 nv_mthd(priv, 0x9097, 0x0a00, 0x00000000);
672 nv_mthd(priv, 0x9097, 0x0a20, 0x00000000);
673 nv_mthd(priv, 0x9097, 0x0a40, 0x00000000);
674 nv_mthd(priv, 0x9097, 0x0a60, 0x00000000);
675 nv_mthd(priv, 0x9097, 0x0a80, 0x00000000);
676 nv_mthd(priv, 0x9097, 0x0aa0, 0x00000000);
677 nv_mthd(priv, 0x9097, 0x0ac0, 0x00000000);
678 nv_mthd(priv, 0x9097, 0x0ae0, 0x00000000);
679 nv_mthd(priv, 0x9097, 0x0b00, 0x00000000);
680 nv_mthd(priv, 0x9097, 0x0b20, 0x00000000);
681 nv_mthd(priv, 0x9097, 0x0b40, 0x00000000);
682 nv_mthd(priv, 0x9097, 0x0b60, 0x00000000);
683 nv_mthd(priv, 0x9097, 0x0b80, 0x00000000);
684 nv_mthd(priv, 0x9097, 0x0ba0, 0x00000000);
685 nv_mthd(priv, 0x9097, 0x0bc0, 0x00000000);
686 nv_mthd(priv, 0x9097, 0x0be0, 0x00000000);
687 nv_mthd(priv, 0x9097, 0x0a04, 0x00000000);
688 nv_mthd(priv, 0x9097, 0x0a24, 0x00000000);
689 nv_mthd(priv, 0x9097, 0x0a44, 0x00000000);
690 nv_mthd(priv, 0x9097, 0x0a64, 0x00000000);
691 nv_mthd(priv, 0x9097, 0x0a84, 0x00000000);
692 nv_mthd(priv, 0x9097, 0x0aa4, 0x00000000);
693 nv_mthd(priv, 0x9097, 0x0ac4, 0x00000000);
694 nv_mthd(priv, 0x9097, 0x0ae4, 0x00000000);
695 nv_mthd(priv, 0x9097, 0x0b04, 0x00000000);
696 nv_mthd(priv, 0x9097, 0x0b24, 0x00000000);
697 nv_mthd(priv, 0x9097, 0x0b44, 0x00000000);
698 nv_mthd(priv, 0x9097, 0x0b64, 0x00000000);
699 nv_mthd(priv, 0x9097, 0x0b84, 0x00000000);
700 nv_mthd(priv, 0x9097, 0x0ba4, 0x00000000);
701 nv_mthd(priv, 0x9097, 0x0bc4, 0x00000000);
702 nv_mthd(priv, 0x9097, 0x0be4, 0x00000000);
703 nv_mthd(priv, 0x9097, 0x0a08, 0x00000000);
704 nv_mthd(priv, 0x9097, 0x0a28, 0x00000000);
705 nv_mthd(priv, 0x9097, 0x0a48, 0x00000000);
706 nv_mthd(priv, 0x9097, 0x0a68, 0x00000000);
707 nv_mthd(priv, 0x9097, 0x0a88, 0x00000000);
708 nv_mthd(priv, 0x9097, 0x0aa8, 0x00000000);
709 nv_mthd(priv, 0x9097, 0x0ac8, 0x00000000);
710 nv_mthd(priv, 0x9097, 0x0ae8, 0x00000000);
711 nv_mthd(priv, 0x9097, 0x0b08, 0x00000000);
712 nv_mthd(priv, 0x9097, 0x0b28, 0x00000000);
713 nv_mthd(priv, 0x9097, 0x0b48, 0x00000000);
714 nv_mthd(priv, 0x9097, 0x0b68, 0x00000000);
715 nv_mthd(priv, 0x9097, 0x0b88, 0x00000000);
716 nv_mthd(priv, 0x9097, 0x0ba8, 0x00000000);
717 nv_mthd(priv, 0x9097, 0x0bc8, 0x00000000);
718 nv_mthd(priv, 0x9097, 0x0be8, 0x00000000);
719 nv_mthd(priv, 0x9097, 0x0a0c, 0x00000000);
720 nv_mthd(priv, 0x9097, 0x0a2c, 0x00000000);
721 nv_mthd(priv, 0x9097, 0x0a4c, 0x00000000);
722 nv_mthd(priv, 0x9097, 0x0a6c, 0x00000000);
723 nv_mthd(priv, 0x9097, 0x0a8c, 0x00000000);
724 nv_mthd(priv, 0x9097, 0x0aac, 0x00000000);
725 nv_mthd(priv, 0x9097, 0x0acc, 0x00000000);
726 nv_mthd(priv, 0x9097, 0x0aec, 0x00000000);
727 nv_mthd(priv, 0x9097, 0x0b0c, 0x00000000);
728 nv_mthd(priv, 0x9097, 0x0b2c, 0x00000000);
729 nv_mthd(priv, 0x9097, 0x0b4c, 0x00000000);
730 nv_mthd(priv, 0x9097, 0x0b6c, 0x00000000);
731 nv_mthd(priv, 0x9097, 0x0b8c, 0x00000000);
732 nv_mthd(priv, 0x9097, 0x0bac, 0x00000000);
733 nv_mthd(priv, 0x9097, 0x0bcc, 0x00000000);
734 nv_mthd(priv, 0x9097, 0x0bec, 0x00000000);
735 nv_mthd(priv, 0x9097, 0x0a10, 0x00000000);
736 nv_mthd(priv, 0x9097, 0x0a30, 0x00000000);
737 nv_mthd(priv, 0x9097, 0x0a50, 0x00000000);
738 nv_mthd(priv, 0x9097, 0x0a70, 0x00000000);
739 nv_mthd(priv, 0x9097, 0x0a90, 0x00000000);
740 nv_mthd(priv, 0x9097, 0x0ab0, 0x00000000);
741 nv_mthd(priv, 0x9097, 0x0ad0, 0x00000000);
742 nv_mthd(priv, 0x9097, 0x0af0, 0x00000000);
743 nv_mthd(priv, 0x9097, 0x0b10, 0x00000000);
744 nv_mthd(priv, 0x9097, 0x0b30, 0x00000000);
745 nv_mthd(priv, 0x9097, 0x0b50, 0x00000000);
746 nv_mthd(priv, 0x9097, 0x0b70, 0x00000000);
747 nv_mthd(priv, 0x9097, 0x0b90, 0x00000000);
748 nv_mthd(priv, 0x9097, 0x0bb0, 0x00000000);
749 nv_mthd(priv, 0x9097, 0x0bd0, 0x00000000);
750 nv_mthd(priv, 0x9097, 0x0bf0, 0x00000000);
751 nv_mthd(priv, 0x9097, 0x0a14, 0x00000000);
752 nv_mthd(priv, 0x9097, 0x0a34, 0x00000000);
753 nv_mthd(priv, 0x9097, 0x0a54, 0x00000000);
754 nv_mthd(priv, 0x9097, 0x0a74, 0x00000000);
755 nv_mthd(priv, 0x9097, 0x0a94, 0x00000000);
756 nv_mthd(priv, 0x9097, 0x0ab4, 0x00000000);
757 nv_mthd(priv, 0x9097, 0x0ad4, 0x00000000);
758 nv_mthd(priv, 0x9097, 0x0af4, 0x00000000);
759 nv_mthd(priv, 0x9097, 0x0b14, 0x00000000);
760 nv_mthd(priv, 0x9097, 0x0b34, 0x00000000);
761 nv_mthd(priv, 0x9097, 0x0b54, 0x00000000);
762 nv_mthd(priv, 0x9097, 0x0b74, 0x00000000);
763 nv_mthd(priv, 0x9097, 0x0b94, 0x00000000);
764 nv_mthd(priv, 0x9097, 0x0bb4, 0x00000000);
765 nv_mthd(priv, 0x9097, 0x0bd4, 0x00000000);
766 nv_mthd(priv, 0x9097, 0x0bf4, 0x00000000);
767 nv_mthd(priv, 0x9097, 0x0c00, 0x00000000);
768 nv_mthd(priv, 0x9097, 0x0c10, 0x00000000);
769 nv_mthd(priv, 0x9097, 0x0c20, 0x00000000);
770 nv_mthd(priv, 0x9097, 0x0c30, 0x00000000);
771 nv_mthd(priv, 0x9097, 0x0c40, 0x00000000);
772 nv_mthd(priv, 0x9097, 0x0c50, 0x00000000);
773 nv_mthd(priv, 0x9097, 0x0c60, 0x00000000);
774 nv_mthd(priv, 0x9097, 0x0c70, 0x00000000);
775 nv_mthd(priv, 0x9097, 0x0c80, 0x00000000);
776 nv_mthd(priv, 0x9097, 0x0c90, 0x00000000);
777 nv_mthd(priv, 0x9097, 0x0ca0, 0x00000000);
778 nv_mthd(priv, 0x9097, 0x0cb0, 0x00000000);
779 nv_mthd(priv, 0x9097, 0x0cc0, 0x00000000);
780 nv_mthd(priv, 0x9097, 0x0cd0, 0x00000000);
781 nv_mthd(priv, 0x9097, 0x0ce0, 0x00000000);
782 nv_mthd(priv, 0x9097, 0x0cf0, 0x00000000);
783 nv_mthd(priv, 0x9097, 0x0c04, 0x00000000);
784 nv_mthd(priv, 0x9097, 0x0c14, 0x00000000);
785 nv_mthd(priv, 0x9097, 0x0c24, 0x00000000);
786 nv_mthd(priv, 0x9097, 0x0c34, 0x00000000);
787 nv_mthd(priv, 0x9097, 0x0c44, 0x00000000);
788 nv_mthd(priv, 0x9097, 0x0c54, 0x00000000);
789 nv_mthd(priv, 0x9097, 0x0c64, 0x00000000);
790 nv_mthd(priv, 0x9097, 0x0c74, 0x00000000);
791 nv_mthd(priv, 0x9097, 0x0c84, 0x00000000);
792 nv_mthd(priv, 0x9097, 0x0c94, 0x00000000);
793 nv_mthd(priv, 0x9097, 0x0ca4, 0x00000000);
794 nv_mthd(priv, 0x9097, 0x0cb4, 0x00000000);
795 nv_mthd(priv, 0x9097, 0x0cc4, 0x00000000);
796 nv_mthd(priv, 0x9097, 0x0cd4, 0x00000000);
797 nv_mthd(priv, 0x9097, 0x0ce4, 0x00000000);
798 nv_mthd(priv, 0x9097, 0x0cf4, 0x00000000);
799 nv_mthd(priv, 0x9097, 0x0c08, 0x00000000);
800 nv_mthd(priv, 0x9097, 0x0c18, 0x00000000);
801 nv_mthd(priv, 0x9097, 0x0c28, 0x00000000);
802 nv_mthd(priv, 0x9097, 0x0c38, 0x00000000);
803 nv_mthd(priv, 0x9097, 0x0c48, 0x00000000);
804 nv_mthd(priv, 0x9097, 0x0c58, 0x00000000);
805 nv_mthd(priv, 0x9097, 0x0c68, 0x00000000);
806 nv_mthd(priv, 0x9097, 0x0c78, 0x00000000);
807 nv_mthd(priv, 0x9097, 0x0c88, 0x00000000);
808 nv_mthd(priv, 0x9097, 0x0c98, 0x00000000);
809 nv_mthd(priv, 0x9097, 0x0ca8, 0x00000000);
810 nv_mthd(priv, 0x9097, 0x0cb8, 0x00000000);
811 nv_mthd(priv, 0x9097, 0x0cc8, 0x00000000);
812 nv_mthd(priv, 0x9097, 0x0cd8, 0x00000000);
813 nv_mthd(priv, 0x9097, 0x0ce8, 0x00000000);
814 nv_mthd(priv, 0x9097, 0x0cf8, 0x00000000);
815 nv_mthd(priv, 0x9097, 0x0c0c, 0x3f800000);
816 nv_mthd(priv, 0x9097, 0x0c1c, 0x3f800000);
817 nv_mthd(priv, 0x9097, 0x0c2c, 0x3f800000);
818 nv_mthd(priv, 0x9097, 0x0c3c, 0x3f800000);
819 nv_mthd(priv, 0x9097, 0x0c4c, 0x3f800000);
820 nv_mthd(priv, 0x9097, 0x0c5c, 0x3f800000);
821 nv_mthd(priv, 0x9097, 0x0c6c, 0x3f800000);
822 nv_mthd(priv, 0x9097, 0x0c7c, 0x3f800000);
823 nv_mthd(priv, 0x9097, 0x0c8c, 0x3f800000);
824 nv_mthd(priv, 0x9097, 0x0c9c, 0x3f800000);
825 nv_mthd(priv, 0x9097, 0x0cac, 0x3f800000);
826 nv_mthd(priv, 0x9097, 0x0cbc, 0x3f800000);
827 nv_mthd(priv, 0x9097, 0x0ccc, 0x3f800000);
828 nv_mthd(priv, 0x9097, 0x0cdc, 0x3f800000);
829 nv_mthd(priv, 0x9097, 0x0cec, 0x3f800000);
830 nv_mthd(priv, 0x9097, 0x0cfc, 0x3f800000);
831 nv_mthd(priv, 0x9097, 0x0d00, 0xffff0000);
832 nv_mthd(priv, 0x9097, 0x0d08, 0xffff0000);
833 nv_mthd(priv, 0x9097, 0x0d10, 0xffff0000);
834 nv_mthd(priv, 0x9097, 0x0d18, 0xffff0000);
835 nv_mthd(priv, 0x9097, 0x0d20, 0xffff0000);
836 nv_mthd(priv, 0x9097, 0x0d28, 0xffff0000);
837 nv_mthd(priv, 0x9097, 0x0d30, 0xffff0000);
838 nv_mthd(priv, 0x9097, 0x0d38, 0xffff0000);
839 nv_mthd(priv, 0x9097, 0x0d04, 0xffff0000);
840 nv_mthd(priv, 0x9097, 0x0d0c, 0xffff0000);
841 nv_mthd(priv, 0x9097, 0x0d14, 0xffff0000);
842 nv_mthd(priv, 0x9097, 0x0d1c, 0xffff0000);
843 nv_mthd(priv, 0x9097, 0x0d24, 0xffff0000);
844 nv_mthd(priv, 0x9097, 0x0d2c, 0xffff0000);
845 nv_mthd(priv, 0x9097, 0x0d34, 0xffff0000);
846 nv_mthd(priv, 0x9097, 0x0d3c, 0xffff0000);
847 nv_mthd(priv, 0x9097, 0x0e00, 0x00000000);
848 nv_mthd(priv, 0x9097, 0x0e10, 0x00000000);
849 nv_mthd(priv, 0x9097, 0x0e20, 0x00000000);
850 nv_mthd(priv, 0x9097, 0x0e30, 0x00000000);
851 nv_mthd(priv, 0x9097, 0x0e40, 0x00000000);
852 nv_mthd(priv, 0x9097, 0x0e50, 0x00000000);
853 nv_mthd(priv, 0x9097, 0x0e60, 0x00000000);
854 nv_mthd(priv, 0x9097, 0x0e70, 0x00000000);
855 nv_mthd(priv, 0x9097, 0x0e80, 0x00000000);
856 nv_mthd(priv, 0x9097, 0x0e90, 0x00000000);
857 nv_mthd(priv, 0x9097, 0x0ea0, 0x00000000);
858 nv_mthd(priv, 0x9097, 0x0eb0, 0x00000000);
859 nv_mthd(priv, 0x9097, 0x0ec0, 0x00000000);
860 nv_mthd(priv, 0x9097, 0x0ed0, 0x00000000);
861 nv_mthd(priv, 0x9097, 0x0ee0, 0x00000000);
862 nv_mthd(priv, 0x9097, 0x0ef0, 0x00000000);
863 nv_mthd(priv, 0x9097, 0x0e04, 0xffff0000);
864 nv_mthd(priv, 0x9097, 0x0e14, 0xffff0000);
865 nv_mthd(priv, 0x9097, 0x0e24, 0xffff0000);
866 nv_mthd(priv, 0x9097, 0x0e34, 0xffff0000);
867 nv_mthd(priv, 0x9097, 0x0e44, 0xffff0000);
868 nv_mthd(priv, 0x9097, 0x0e54, 0xffff0000);
869 nv_mthd(priv, 0x9097, 0x0e64, 0xffff0000);
870 nv_mthd(priv, 0x9097, 0x0e74, 0xffff0000);
871 nv_mthd(priv, 0x9097, 0x0e84, 0xffff0000);
872 nv_mthd(priv, 0x9097, 0x0e94, 0xffff0000);
873 nv_mthd(priv, 0x9097, 0x0ea4, 0xffff0000);
874 nv_mthd(priv, 0x9097, 0x0eb4, 0xffff0000);
875 nv_mthd(priv, 0x9097, 0x0ec4, 0xffff0000);
876 nv_mthd(priv, 0x9097, 0x0ed4, 0xffff0000);
877 nv_mthd(priv, 0x9097, 0x0ee4, 0xffff0000);
878 nv_mthd(priv, 0x9097, 0x0ef4, 0xffff0000);
879 nv_mthd(priv, 0x9097, 0x0e08, 0xffff0000);
880 nv_mthd(priv, 0x9097, 0x0e18, 0xffff0000);
881 nv_mthd(priv, 0x9097, 0x0e28, 0xffff0000);
882 nv_mthd(priv, 0x9097, 0x0e38, 0xffff0000);
883 nv_mthd(priv, 0x9097, 0x0e48, 0xffff0000);
884 nv_mthd(priv, 0x9097, 0x0e58, 0xffff0000);
885 nv_mthd(priv, 0x9097, 0x0e68, 0xffff0000);
886 nv_mthd(priv, 0x9097, 0x0e78, 0xffff0000);
887 nv_mthd(priv, 0x9097, 0x0e88, 0xffff0000);
888 nv_mthd(priv, 0x9097, 0x0e98, 0xffff0000);
889 nv_mthd(priv, 0x9097, 0x0ea8, 0xffff0000);
890 nv_mthd(priv, 0x9097, 0x0eb8, 0xffff0000);
891 nv_mthd(priv, 0x9097, 0x0ec8, 0xffff0000);
892 nv_mthd(priv, 0x9097, 0x0ed8, 0xffff0000);
893 nv_mthd(priv, 0x9097, 0x0ee8, 0xffff0000);
894 nv_mthd(priv, 0x9097, 0x0ef8, 0xffff0000);
895 nv_mthd(priv, 0x9097, 0x0d40, 0x00000000);
896 nv_mthd(priv, 0x9097, 0x0d48, 0x00000000);
897 nv_mthd(priv, 0x9097, 0x0d50, 0x00000000);
898 nv_mthd(priv, 0x9097, 0x0d58, 0x00000000);
899 nv_mthd(priv, 0x9097, 0x0d44, 0x00000000);
900 nv_mthd(priv, 0x9097, 0x0d4c, 0x00000000);
901 nv_mthd(priv, 0x9097, 0x0d54, 0x00000000);
902 nv_mthd(priv, 0x9097, 0x0d5c, 0x00000000);
903 nv_mthd(priv, 0x9097, 0x1e00, 0x00000001);
904 nv_mthd(priv, 0x9097, 0x1e20, 0x00000001);
905 nv_mthd(priv, 0x9097, 0x1e40, 0x00000001);
906 nv_mthd(priv, 0x9097, 0x1e60, 0x00000001);
907 nv_mthd(priv, 0x9097, 0x1e80, 0x00000001);
908 nv_mthd(priv, 0x9097, 0x1ea0, 0x00000001);
909 nv_mthd(priv, 0x9097, 0x1ec0, 0x00000001);
910 nv_mthd(priv, 0x9097, 0x1ee0, 0x00000001);
911 nv_mthd(priv, 0x9097, 0x1e04, 0x00000001);
912 nv_mthd(priv, 0x9097, 0x1e24, 0x00000001);
913 nv_mthd(priv, 0x9097, 0x1e44, 0x00000001);
914 nv_mthd(priv, 0x9097, 0x1e64, 0x00000001);
915 nv_mthd(priv, 0x9097, 0x1e84, 0x00000001);
916 nv_mthd(priv, 0x9097, 0x1ea4, 0x00000001);
917 nv_mthd(priv, 0x9097, 0x1ec4, 0x00000001);
918 nv_mthd(priv, 0x9097, 0x1ee4, 0x00000001);
919 nv_mthd(priv, 0x9097, 0x1e08, 0x00000002);
920 nv_mthd(priv, 0x9097, 0x1e28, 0x00000002);
921 nv_mthd(priv, 0x9097, 0x1e48, 0x00000002);
922 nv_mthd(priv, 0x9097, 0x1e68, 0x00000002);
923 nv_mthd(priv, 0x9097, 0x1e88, 0x00000002);
924 nv_mthd(priv, 0x9097, 0x1ea8, 0x00000002);
925 nv_mthd(priv, 0x9097, 0x1ec8, 0x00000002);
926 nv_mthd(priv, 0x9097, 0x1ee8, 0x00000002);
927 nv_mthd(priv, 0x9097, 0x1e0c, 0x00000001);
928 nv_mthd(priv, 0x9097, 0x1e2c, 0x00000001);
929 nv_mthd(priv, 0x9097, 0x1e4c, 0x00000001);
930 nv_mthd(priv, 0x9097, 0x1e6c, 0x00000001);
931 nv_mthd(priv, 0x9097, 0x1e8c, 0x00000001);
932 nv_mthd(priv, 0x9097, 0x1eac, 0x00000001);
933 nv_mthd(priv, 0x9097, 0x1ecc, 0x00000001);
934 nv_mthd(priv, 0x9097, 0x1eec, 0x00000001);
935 nv_mthd(priv, 0x9097, 0x1e10, 0x00000001);
936 nv_mthd(priv, 0x9097, 0x1e30, 0x00000001);
937 nv_mthd(priv, 0x9097, 0x1e50, 0x00000001);
938 nv_mthd(priv, 0x9097, 0x1e70, 0x00000001);
939 nv_mthd(priv, 0x9097, 0x1e90, 0x00000001);
940 nv_mthd(priv, 0x9097, 0x1eb0, 0x00000001);
941 nv_mthd(priv, 0x9097, 0x1ed0, 0x00000001);
942 nv_mthd(priv, 0x9097, 0x1ef0, 0x00000001);
943 nv_mthd(priv, 0x9097, 0x1e14, 0x00000002);
944 nv_mthd(priv, 0x9097, 0x1e34, 0x00000002);
945 nv_mthd(priv, 0x9097, 0x1e54, 0x00000002);
946 nv_mthd(priv, 0x9097, 0x1e74, 0x00000002);
947 nv_mthd(priv, 0x9097, 0x1e94, 0x00000002);
948 nv_mthd(priv, 0x9097, 0x1eb4, 0x00000002);
949 nv_mthd(priv, 0x9097, 0x1ed4, 0x00000002);
950 nv_mthd(priv, 0x9097, 0x1ef4, 0x00000002);
951 nv_mthd(priv, 0x9097, 0x1e18, 0x00000001);
952 nv_mthd(priv, 0x9097, 0x1e38, 0x00000001);
953 nv_mthd(priv, 0x9097, 0x1e58, 0x00000001);
954 nv_mthd(priv, 0x9097, 0x1e78, 0x00000001);
955 nv_mthd(priv, 0x9097, 0x1e98, 0x00000001);
956 nv_mthd(priv, 0x9097, 0x1eb8, 0x00000001);
957 nv_mthd(priv, 0x9097, 0x1ed8, 0x00000001);
958 nv_mthd(priv, 0x9097, 0x1ef8, 0x00000001);
959 if (fermi == 0x9097) {
960 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
961 nv_mthd(priv, 0x9097, mthd, 0x00000000);
962 }
963 nv_mthd(priv, 0x9097, 0x030c, 0x00000001);
964 nv_mthd(priv, 0x9097, 0x1944, 0x00000000);
965 nv_mthd(priv, 0x9097, 0x1514, 0x00000000);
966 nv_mthd(priv, 0x9097, 0x0d68, 0x0000ffff);
967 nv_mthd(priv, 0x9097, 0x121c, 0x0fac6881);
968 nv_mthd(priv, 0x9097, 0x0fac, 0x00000001);
969 nv_mthd(priv, 0x9097, 0x1538, 0x00000001);
970 nv_mthd(priv, 0x9097, 0x0fe0, 0x00000000);
971 nv_mthd(priv, 0x9097, 0x0fe4, 0x00000000);
972 nv_mthd(priv, 0x9097, 0x0fe8, 0x00000014);
973 nv_mthd(priv, 0x9097, 0x0fec, 0x00000040);
974 nv_mthd(priv, 0x9097, 0x0ff0, 0x00000000);
975 nv_mthd(priv, 0x9097, 0x179c, 0x00000000);
976 nv_mthd(priv, 0x9097, 0x1228, 0x00000400);
977 nv_mthd(priv, 0x9097, 0x122c, 0x00000300);
978 nv_mthd(priv, 0x9097, 0x1230, 0x00010001);
979 nv_mthd(priv, 0x9097, 0x07f8, 0x00000000);
980 nv_mthd(priv, 0x9097, 0x15b4, 0x00000001);
981 nv_mthd(priv, 0x9097, 0x15cc, 0x00000000);
982 nv_mthd(priv, 0x9097, 0x1534, 0x00000000);
983 nv_mthd(priv, 0x9097, 0x0fb0, 0x00000000);
984 nv_mthd(priv, 0x9097, 0x15d0, 0x00000000);
985 nv_mthd(priv, 0x9097, 0x153c, 0x00000000);
986 nv_mthd(priv, 0x9097, 0x16b4, 0x00000003);
987 nv_mthd(priv, 0x9097, 0x0fbc, 0x0000ffff);
988 nv_mthd(priv, 0x9097, 0x0fc0, 0x0000ffff);
989 nv_mthd(priv, 0x9097, 0x0fc4, 0x0000ffff);
990 nv_mthd(priv, 0x9097, 0x0fc8, 0x0000ffff);
991 nv_mthd(priv, 0x9097, 0x0df8, 0x00000000);
992 nv_mthd(priv, 0x9097, 0x0dfc, 0x00000000);
993 nv_mthd(priv, 0x9097, 0x1948, 0x00000000);
994 nv_mthd(priv, 0x9097, 0x1970, 0x00000001);
995 nv_mthd(priv, 0x9097, 0x161c, 0x000009f0);
996 nv_mthd(priv, 0x9097, 0x0dcc, 0x00000010);
997 nv_mthd(priv, 0x9097, 0x163c, 0x00000000);
998 nv_mthd(priv, 0x9097, 0x15e4, 0x00000000);
999 nv_mthd(priv, 0x9097, 0x1160, 0x25e00040);
1000 nv_mthd(priv, 0x9097, 0x1164, 0x25e00040);
1001 nv_mthd(priv, 0x9097, 0x1168, 0x25e00040);
1002 nv_mthd(priv, 0x9097, 0x116c, 0x25e00040);
1003 nv_mthd(priv, 0x9097, 0x1170, 0x25e00040);
1004 nv_mthd(priv, 0x9097, 0x1174, 0x25e00040);
1005 nv_mthd(priv, 0x9097, 0x1178, 0x25e00040);
1006 nv_mthd(priv, 0x9097, 0x117c, 0x25e00040);
1007 nv_mthd(priv, 0x9097, 0x1180, 0x25e00040);
1008 nv_mthd(priv, 0x9097, 0x1184, 0x25e00040);
1009 nv_mthd(priv, 0x9097, 0x1188, 0x25e00040);
1010 nv_mthd(priv, 0x9097, 0x118c, 0x25e00040);
1011 nv_mthd(priv, 0x9097, 0x1190, 0x25e00040);
1012 nv_mthd(priv, 0x9097, 0x1194, 0x25e00040);
1013 nv_mthd(priv, 0x9097, 0x1198, 0x25e00040);
1014 nv_mthd(priv, 0x9097, 0x119c, 0x25e00040);
1015 nv_mthd(priv, 0x9097, 0x11a0, 0x25e00040);
1016 nv_mthd(priv, 0x9097, 0x11a4, 0x25e00040);
1017 nv_mthd(priv, 0x9097, 0x11a8, 0x25e00040);
1018 nv_mthd(priv, 0x9097, 0x11ac, 0x25e00040);
1019 nv_mthd(priv, 0x9097, 0x11b0, 0x25e00040);
1020 nv_mthd(priv, 0x9097, 0x11b4, 0x25e00040);
1021 nv_mthd(priv, 0x9097, 0x11b8, 0x25e00040);
1022 nv_mthd(priv, 0x9097, 0x11bc, 0x25e00040);
1023 nv_mthd(priv, 0x9097, 0x11c0, 0x25e00040);
1024 nv_mthd(priv, 0x9097, 0x11c4, 0x25e00040);
1025 nv_mthd(priv, 0x9097, 0x11c8, 0x25e00040);
1026 nv_mthd(priv, 0x9097, 0x11cc, 0x25e00040);
1027 nv_mthd(priv, 0x9097, 0x11d0, 0x25e00040);
1028 nv_mthd(priv, 0x9097, 0x11d4, 0x25e00040);
1029 nv_mthd(priv, 0x9097, 0x11d8, 0x25e00040);
1030 nv_mthd(priv, 0x9097, 0x11dc, 0x25e00040);
1031 nv_mthd(priv, 0x9097, 0x1880, 0x00000000);
1032 nv_mthd(priv, 0x9097, 0x1884, 0x00000000);
1033 nv_mthd(priv, 0x9097, 0x1888, 0x00000000);
1034 nv_mthd(priv, 0x9097, 0x188c, 0x00000000);
1035 nv_mthd(priv, 0x9097, 0x1890, 0x00000000);
1036 nv_mthd(priv, 0x9097, 0x1894, 0x00000000);
1037 nv_mthd(priv, 0x9097, 0x1898, 0x00000000);
1038 nv_mthd(priv, 0x9097, 0x189c, 0x00000000);
1039 nv_mthd(priv, 0x9097, 0x18a0, 0x00000000);
1040 nv_mthd(priv, 0x9097, 0x18a4, 0x00000000);
1041 nv_mthd(priv, 0x9097, 0x18a8, 0x00000000);
1042 nv_mthd(priv, 0x9097, 0x18ac, 0x00000000);
1043 nv_mthd(priv, 0x9097, 0x18b0, 0x00000000);
1044 nv_mthd(priv, 0x9097, 0x18b4, 0x00000000);
1045 nv_mthd(priv, 0x9097, 0x18b8, 0x00000000);
1046 nv_mthd(priv, 0x9097, 0x18bc, 0x00000000);
1047 nv_mthd(priv, 0x9097, 0x18c0, 0x00000000);
1048 nv_mthd(priv, 0x9097, 0x18c4, 0x00000000);
1049 nv_mthd(priv, 0x9097, 0x18c8, 0x00000000);
1050 nv_mthd(priv, 0x9097, 0x18cc, 0x00000000);
1051 nv_mthd(priv, 0x9097, 0x18d0, 0x00000000);
1052 nv_mthd(priv, 0x9097, 0x18d4, 0x00000000);
1053 nv_mthd(priv, 0x9097, 0x18d8, 0x00000000);
1054 nv_mthd(priv, 0x9097, 0x18dc, 0x00000000);
1055 nv_mthd(priv, 0x9097, 0x18e0, 0x00000000);
1056 nv_mthd(priv, 0x9097, 0x18e4, 0x00000000);
1057 nv_mthd(priv, 0x9097, 0x18e8, 0x00000000);
1058 nv_mthd(priv, 0x9097, 0x18ec, 0x00000000);
1059 nv_mthd(priv, 0x9097, 0x18f0, 0x00000000);
1060 nv_mthd(priv, 0x9097, 0x18f4, 0x00000000);
1061 nv_mthd(priv, 0x9097, 0x18f8, 0x00000000);
1062 nv_mthd(priv, 0x9097, 0x18fc, 0x00000000);
1063 nv_mthd(priv, 0x9097, 0x0f84, 0x00000000);
1064 nv_mthd(priv, 0x9097, 0x0f88, 0x00000000);
1065 nv_mthd(priv, 0x9097, 0x17c8, 0x00000000);
1066 nv_mthd(priv, 0x9097, 0x17cc, 0x00000000);
1067 nv_mthd(priv, 0x9097, 0x17d0, 0x000000ff);
1068 nv_mthd(priv, 0x9097, 0x17d4, 0xffffffff);
1069 nv_mthd(priv, 0x9097, 0x17d8, 0x00000002);
1070 nv_mthd(priv, 0x9097, 0x17dc, 0x00000000);
1071 nv_mthd(priv, 0x9097, 0x15f4, 0x00000000);
1072 nv_mthd(priv, 0x9097, 0x15f8, 0x00000000);
1073 nv_mthd(priv, 0x9097, 0x1434, 0x00000000);
1074 nv_mthd(priv, 0x9097, 0x1438, 0x00000000);
1075 nv_mthd(priv, 0x9097, 0x0d74, 0x00000000);
1076 nv_mthd(priv, 0x9097, 0x0dec, 0x00000001);
1077 nv_mthd(priv, 0x9097, 0x13a4, 0x00000000);
1078 nv_mthd(priv, 0x9097, 0x1318, 0x00000001);
1079 nv_mthd(priv, 0x9097, 0x1644, 0x00000000);
1080 nv_mthd(priv, 0x9097, 0x0748, 0x00000000);
1081 nv_mthd(priv, 0x9097, 0x0de8, 0x00000000);
1082 nv_mthd(priv, 0x9097, 0x1648, 0x00000000);
1083 nv_mthd(priv, 0x9097, 0x12a4, 0x00000000);
1084 nv_mthd(priv, 0x9097, 0x1120, 0x00000000);
1085 nv_mthd(priv, 0x9097, 0x1124, 0x00000000);
1086 nv_mthd(priv, 0x9097, 0x1128, 0x00000000);
1087 nv_mthd(priv, 0x9097, 0x112c, 0x00000000);
1088 nv_mthd(priv, 0x9097, 0x1118, 0x00000000);
1089 nv_mthd(priv, 0x9097, 0x164c, 0x00000000);
1090 nv_mthd(priv, 0x9097, 0x1658, 0x00000000);
1091 nv_mthd(priv, 0x9097, 0x1910, 0x00000290);
1092 nv_mthd(priv, 0x9097, 0x1518, 0x00000000);
1093 nv_mthd(priv, 0x9097, 0x165c, 0x00000001);
1094 nv_mthd(priv, 0x9097, 0x1520, 0x00000000);
1095 nv_mthd(priv, 0x9097, 0x1604, 0x00000000);
1096 nv_mthd(priv, 0x9097, 0x1570, 0x00000000);
1097 nv_mthd(priv, 0x9097, 0x13b0, 0x3f800000);
1098 nv_mthd(priv, 0x9097, 0x13b4, 0x3f800000);
1099 nv_mthd(priv, 0x9097, 0x020c, 0x00000000);
1100 nv_mthd(priv, 0x9097, 0x1670, 0x30201000);
1101 nv_mthd(priv, 0x9097, 0x1674, 0x70605040);
1102 nv_mthd(priv, 0x9097, 0x1678, 0xb8a89888);
1103 nv_mthd(priv, 0x9097, 0x167c, 0xf8e8d8c8);
1104 nv_mthd(priv, 0x9097, 0x166c, 0x00000000);
1105 nv_mthd(priv, 0x9097, 0x1680, 0x00ffff00);
1106 nv_mthd(priv, 0x9097, 0x12d0, 0x00000003);
1107 nv_mthd(priv, 0x9097, 0x12d4, 0x00000002);
1108 nv_mthd(priv, 0x9097, 0x1684, 0x00000000);
1109 nv_mthd(priv, 0x9097, 0x1688, 0x00000000);
1110 nv_mthd(priv, 0x9097, 0x0dac, 0x00001b02);
1111 nv_mthd(priv, 0x9097, 0x0db0, 0x00001b02);
1112 nv_mthd(priv, 0x9097, 0x0db4, 0x00000000);
1113 nv_mthd(priv, 0x9097, 0x168c, 0x00000000);
1114 nv_mthd(priv, 0x9097, 0x15bc, 0x00000000);
1115 nv_mthd(priv, 0x9097, 0x156c, 0x00000000);
1116 nv_mthd(priv, 0x9097, 0x187c, 0x00000000);
1117 nv_mthd(priv, 0x9097, 0x1110, 0x00000001);
1118 nv_mthd(priv, 0x9097, 0x0dc0, 0x00000000);
1119 nv_mthd(priv, 0x9097, 0x0dc4, 0x00000000);
1120 nv_mthd(priv, 0x9097, 0x0dc8, 0x00000000);
1121 nv_mthd(priv, 0x9097, 0x1234, 0x00000000);
1122 nv_mthd(priv, 0x9097, 0x1690, 0x00000000);
1123 nv_mthd(priv, 0x9097, 0x12ac, 0x00000001);
1124 nv_mthd(priv, 0x9097, 0x02c4, 0x00000000);
1125 nv_mthd(priv, 0x9097, 0x0790, 0x00000000);
1126 nv_mthd(priv, 0x9097, 0x0794, 0x00000000);
1127 nv_mthd(priv, 0x9097, 0x0798, 0x00000000);
1128 nv_mthd(priv, 0x9097, 0x079c, 0x00000000);
1129 nv_mthd(priv, 0x9097, 0x07a0, 0x00000000);
1130 nv_mthd(priv, 0x9097, 0x077c, 0x00000000);
1131 nv_mthd(priv, 0x9097, 0x1000, 0x00000010);
1132 nv_mthd(priv, 0x9097, 0x10fc, 0x00000000);
1133 nv_mthd(priv, 0x9097, 0x1290, 0x00000000);
1134 nv_mthd(priv, 0x9097, 0x0218, 0x00000010);
1135 nv_mthd(priv, 0x9097, 0x12d8, 0x00000000);
1136 nv_mthd(priv, 0x9097, 0x12dc, 0x00000010);
1137 nv_mthd(priv, 0x9097, 0x0d94, 0x00000001);
1138 nv_mthd(priv, 0x9097, 0x155c, 0x00000000);
1139 nv_mthd(priv, 0x9097, 0x1560, 0x00000000);
1140 nv_mthd(priv, 0x9097, 0x1564, 0x00001fff);
1141 nv_mthd(priv, 0x9097, 0x1574, 0x00000000);
1142 nv_mthd(priv, 0x9097, 0x1578, 0x00000000);
1143 nv_mthd(priv, 0x9097, 0x157c, 0x003fffff);
1144 nv_mthd(priv, 0x9097, 0x1354, 0x00000000);
1145 nv_mthd(priv, 0x9097, 0x1664, 0x00000000);
1146 nv_mthd(priv, 0x9097, 0x1610, 0x00000012);
1147 nv_mthd(priv, 0x9097, 0x1608, 0x00000000);
1148 nv_mthd(priv, 0x9097, 0x160c, 0x00000000);
1149 nv_mthd(priv, 0x9097, 0x162c, 0x00000003);
1150 nv_mthd(priv, 0x9097, 0x0210, 0x00000000);
1151 nv_mthd(priv, 0x9097, 0x0320, 0x00000000);
1152 nv_mthd(priv, 0x9097, 0x0324, 0x3f800000);
1153 nv_mthd(priv, 0x9097, 0x0328, 0x3f800000);
1154 nv_mthd(priv, 0x9097, 0x032c, 0x3f800000);
1155 nv_mthd(priv, 0x9097, 0x0330, 0x3f800000);
1156 nv_mthd(priv, 0x9097, 0x0334, 0x3f800000);
1157 nv_mthd(priv, 0x9097, 0x0338, 0x3f800000);
1158 nv_mthd(priv, 0x9097, 0x0750, 0x00000000);
1159 nv_mthd(priv, 0x9097, 0x0760, 0x39291909);
1160 nv_mthd(priv, 0x9097, 0x0764, 0x79695949);
1161 nv_mthd(priv, 0x9097, 0x0768, 0xb9a99989);
1162 nv_mthd(priv, 0x9097, 0x076c, 0xf9e9d9c9);
1163 nv_mthd(priv, 0x9097, 0x0770, 0x30201000);
1164 nv_mthd(priv, 0x9097, 0x0774, 0x70605040);
1165 nv_mthd(priv, 0x9097, 0x0778, 0x00009080);
1166 nv_mthd(priv, 0x9097, 0x0780, 0x39291909);
1167 nv_mthd(priv, 0x9097, 0x0784, 0x79695949);
1168 nv_mthd(priv, 0x9097, 0x0788, 0xb9a99989);
1169 nv_mthd(priv, 0x9097, 0x078c, 0xf9e9d9c9);
1170 nv_mthd(priv, 0x9097, 0x07d0, 0x30201000);
1171 nv_mthd(priv, 0x9097, 0x07d4, 0x70605040);
1172 nv_mthd(priv, 0x9097, 0x07d8, 0x00009080);
1173 nv_mthd(priv, 0x9097, 0x037c, 0x00000001);
1174 nv_mthd(priv, 0x9097, 0x0740, 0x00000000);
1175 nv_mthd(priv, 0x9097, 0x0744, 0x00000000);
1176 nv_mthd(priv, 0x9097, 0x2600, 0x00000000);
1177 nv_mthd(priv, 0x9097, 0x1918, 0x00000000);
1178 nv_mthd(priv, 0x9097, 0x191c, 0x00000900);
1179 nv_mthd(priv, 0x9097, 0x1920, 0x00000405);
1180 nv_mthd(priv, 0x9097, 0x1308, 0x00000001);
1181 nv_mthd(priv, 0x9097, 0x1924, 0x00000000);
1182 nv_mthd(priv, 0x9097, 0x13ac, 0x00000000);
1183 nv_mthd(priv, 0x9097, 0x192c, 0x00000001);
1184 nv_mthd(priv, 0x9097, 0x193c, 0x00002c1c);
1185 nv_mthd(priv, 0x9097, 0x0d7c, 0x00000000);
1186 nv_mthd(priv, 0x9097, 0x0f8c, 0x00000000);
1187 nv_mthd(priv, 0x9097, 0x02c0, 0x00000001);
1188 nv_mthd(priv, 0x9097, 0x1510, 0x00000000);
1189 nv_mthd(priv, 0x9097, 0x1940, 0x00000000);
1190 nv_mthd(priv, 0x9097, 0x0ff4, 0x00000000);
1191 nv_mthd(priv, 0x9097, 0x0ff8, 0x00000000);
1192 nv_mthd(priv, 0x9097, 0x194c, 0x00000000);
1193 nv_mthd(priv, 0x9097, 0x1950, 0x00000000);
1194 nv_mthd(priv, 0x9097, 0x1968, 0x00000000);
1195 nv_mthd(priv, 0x9097, 0x1590, 0x0000003f);
1196 nv_mthd(priv, 0x9097, 0x07e8, 0x00000000);
1197 nv_mthd(priv, 0x9097, 0x07ec, 0x00000000);
1198 nv_mthd(priv, 0x9097, 0x07f0, 0x00000000);
1199 nv_mthd(priv, 0x9097, 0x07f4, 0x00000000);
1200 nv_mthd(priv, 0x9097, 0x196c, 0x00000011);
1201 nv_mthd(priv, 0x9097, 0x197c, 0x00000000);
1202 nv_mthd(priv, 0x9097, 0x0fcc, 0x00000000);
1203 nv_mthd(priv, 0x9097, 0x0fd0, 0x00000000);
1204 nv_mthd(priv, 0x9097, 0x02d8, 0x00000040);
1205 nv_mthd(priv, 0x9097, 0x1980, 0x00000080);
1206 nv_mthd(priv, 0x9097, 0x1504, 0x00000080);
1207 nv_mthd(priv, 0x9097, 0x1984, 0x00000000);
1208 nv_mthd(priv, 0x9097, 0x0300, 0x00000001);
1209 nv_mthd(priv, 0x9097, 0x13a8, 0x00000000);
1210 nv_mthd(priv, 0x9097, 0x12ec, 0x00000000);
1211 nv_mthd(priv, 0x9097, 0x1310, 0x00000000);
1212 nv_mthd(priv, 0x9097, 0x1314, 0x00000001);
1213 nv_mthd(priv, 0x9097, 0x1380, 0x00000000);
1214 nv_mthd(priv, 0x9097, 0x1384, 0x00000001);
1215 nv_mthd(priv, 0x9097, 0x1388, 0x00000001);
1216 nv_mthd(priv, 0x9097, 0x138c, 0x00000001);
1217 nv_mthd(priv, 0x9097, 0x1390, 0x00000001);
1218 nv_mthd(priv, 0x9097, 0x1394, 0x00000000);
1219 nv_mthd(priv, 0x9097, 0x139c, 0x00000000);
1220 nv_mthd(priv, 0x9097, 0x1398, 0x00000000);
1221 nv_mthd(priv, 0x9097, 0x1594, 0x00000000);
1222 nv_mthd(priv, 0x9097, 0x1598, 0x00000001);
1223 nv_mthd(priv, 0x9097, 0x159c, 0x00000001);
1224 nv_mthd(priv, 0x9097, 0x15a0, 0x00000001);
1225 nv_mthd(priv, 0x9097, 0x15a4, 0x00000001);
1226 nv_mthd(priv, 0x9097, 0x0f54, 0x00000000);
1227 nv_mthd(priv, 0x9097, 0x0f58, 0x00000000);
1228 nv_mthd(priv, 0x9097, 0x0f5c, 0x00000000);
1229 nv_mthd(priv, 0x9097, 0x19bc, 0x00000000);
1230 nv_mthd(priv, 0x9097, 0x0f9c, 0x00000000);
1231 nv_mthd(priv, 0x9097, 0x0fa0, 0x00000000);
1232 nv_mthd(priv, 0x9097, 0x12cc, 0x00000000);
1233 nv_mthd(priv, 0x9097, 0x12e8, 0x00000000);
1234 nv_mthd(priv, 0x9097, 0x130c, 0x00000001);
1235 nv_mthd(priv, 0x9097, 0x1360, 0x00000000);
1236 nv_mthd(priv, 0x9097, 0x1364, 0x00000000);
1237 nv_mthd(priv, 0x9097, 0x1368, 0x00000000);
1238 nv_mthd(priv, 0x9097, 0x136c, 0x00000000);
1239 nv_mthd(priv, 0x9097, 0x1370, 0x00000000);
1240 nv_mthd(priv, 0x9097, 0x1374, 0x00000000);
1241 nv_mthd(priv, 0x9097, 0x1378, 0x00000000);
1242 nv_mthd(priv, 0x9097, 0x137c, 0x00000000);
1243 nv_mthd(priv, 0x9097, 0x133c, 0x00000001);
1244 nv_mthd(priv, 0x9097, 0x1340, 0x00000001);
1245 nv_mthd(priv, 0x9097, 0x1344, 0x00000002);
1246 nv_mthd(priv, 0x9097, 0x1348, 0x00000001);
1247 nv_mthd(priv, 0x9097, 0x134c, 0x00000001);
1248 nv_mthd(priv, 0x9097, 0x1350, 0x00000002);
1249 nv_mthd(priv, 0x9097, 0x1358, 0x00000001);
1250 nv_mthd(priv, 0x9097, 0x12e4, 0x00000000);
1251 nv_mthd(priv, 0x9097, 0x131c, 0x00000000);
1252 nv_mthd(priv, 0x9097, 0x1320, 0x00000000);
1253 nv_mthd(priv, 0x9097, 0x1324, 0x00000000);
1254 nv_mthd(priv, 0x9097, 0x1328, 0x00000000);
1255 nv_mthd(priv, 0x9097, 0x19c0, 0x00000000);
1256 nv_mthd(priv, 0x9097, 0x1140, 0x00000000);
1257 nv_mthd(priv, 0x9097, 0x19c4, 0x00000000);
1258 nv_mthd(priv, 0x9097, 0x19c8, 0x00001500);
1259 nv_mthd(priv, 0x9097, 0x135c, 0x00000000);
1260 nv_mthd(priv, 0x9097, 0x0f90, 0x00000000);
1261 nv_mthd(priv, 0x9097, 0x19e0, 0x00000001);
1262 nv_mthd(priv, 0x9097, 0x19e4, 0x00000001);
1263 nv_mthd(priv, 0x9097, 0x19e8, 0x00000001);
1264 nv_mthd(priv, 0x9097, 0x19ec, 0x00000001);
1265 nv_mthd(priv, 0x9097, 0x19f0, 0x00000001);
1266 nv_mthd(priv, 0x9097, 0x19f4, 0x00000001);
1267 nv_mthd(priv, 0x9097, 0x19f8, 0x00000001);
1268 nv_mthd(priv, 0x9097, 0x19fc, 0x00000001);
1269 nv_mthd(priv, 0x9097, 0x19cc, 0x00000001);
1270 nv_mthd(priv, 0x9097, 0x15b8, 0x00000000);
1271 nv_mthd(priv, 0x9097, 0x1a00, 0x00001111);
1272 nv_mthd(priv, 0x9097, 0x1a04, 0x00000000);
1273 nv_mthd(priv, 0x9097, 0x1a08, 0x00000000);
1274 nv_mthd(priv, 0x9097, 0x1a0c, 0x00000000);
1275 nv_mthd(priv, 0x9097, 0x1a10, 0x00000000);
1276 nv_mthd(priv, 0x9097, 0x1a14, 0x00000000);
1277 nv_mthd(priv, 0x9097, 0x1a18, 0x00000000);
1278 nv_mthd(priv, 0x9097, 0x1a1c, 0x00000000);
1279 nv_mthd(priv, 0x9097, 0x0d6c, 0xffff0000);
1280 nv_mthd(priv, 0x9097, 0x0d70, 0xffff0000);
1281 nv_mthd(priv, 0x9097, 0x10f8, 0x00001010);
1282 nv_mthd(priv, 0x9097, 0x0d80, 0x00000000);
1283 nv_mthd(priv, 0x9097, 0x0d84, 0x00000000);
1284 nv_mthd(priv, 0x9097, 0x0d88, 0x00000000);
1285 nv_mthd(priv, 0x9097, 0x0d8c, 0x00000000);
1286 nv_mthd(priv, 0x9097, 0x0d90, 0x00000000);
1287 nv_mthd(priv, 0x9097, 0x0da0, 0x00000000);
1288 nv_mthd(priv, 0x9097, 0x1508, 0x80000000);
1289 nv_mthd(priv, 0x9097, 0x150c, 0x40000000);
1290 nv_mthd(priv, 0x9097, 0x1668, 0x00000000);
1291 nv_mthd(priv, 0x9097, 0x0318, 0x00000008);
1292 nv_mthd(priv, 0x9097, 0x031c, 0x00000008);
1293 nv_mthd(priv, 0x9097, 0x0d9c, 0x00000001);
1294 nv_mthd(priv, 0x9097, 0x07dc, 0x00000000);
1295 nv_mthd(priv, 0x9097, 0x074c, 0x00000055);
1296 nv_mthd(priv, 0x9097, 0x1420, 0x00000003);
1297 nv_mthd(priv, 0x9097, 0x17bc, 0x00000000);
1298 nv_mthd(priv, 0x9097, 0x17c0, 0x00000000);
1299 nv_mthd(priv, 0x9097, 0x17c4, 0x00000001);
1300 nv_mthd(priv, 0x9097, 0x1008, 0x00000008);
1301 nv_mthd(priv, 0x9097, 0x100c, 0x00000040);
1302 nv_mthd(priv, 0x9097, 0x1010, 0x0000012c);
1303 nv_mthd(priv, 0x9097, 0x0d60, 0x00000040);
1304 nv_mthd(priv, 0x9097, 0x075c, 0x00000003);
1305 nv_mthd(priv, 0x9097, 0x1018, 0x00000020);
1306 nv_mthd(priv, 0x9097, 0x101c, 0x00000001);
1307 nv_mthd(priv, 0x9097, 0x1020, 0x00000020);
1308 nv_mthd(priv, 0x9097, 0x1024, 0x00000001);
1309 nv_mthd(priv, 0x9097, 0x1444, 0x00000000);
1310 nv_mthd(priv, 0x9097, 0x1448, 0x00000000);
1311 nv_mthd(priv, 0x9097, 0x144c, 0x00000000);
1312 nv_mthd(priv, 0x9097, 0x0360, 0x20164010);
1313 nv_mthd(priv, 0x9097, 0x0364, 0x00000020);
1314 nv_mthd(priv, 0x9097, 0x0368, 0x00000000);
1315 nv_mthd(priv, 0x9097, 0x0de4, 0x00000000);
1316 nv_mthd(priv, 0x9097, 0x0204, 0x00000006);
1317 nv_mthd(priv, 0x9097, 0x0208, 0x00000000);
1318 nv_mthd(priv, 0x9097, 0x02cc, 0x003fffff);
1319 nv_mthd(priv, 0x9097, 0x02d0, 0x00000c48);
1320 nv_mthd(priv, 0x9097, 0x1220, 0x00000005);
1321 nv_mthd(priv, 0x9097, 0x0fdc, 0x00000000);
1322 nv_mthd(priv, 0x9097, 0x0f98, 0x00300008);
1323 nv_mthd(priv, 0x9097, 0x1284, 0x04000080);
1324 nv_mthd(priv, 0x9097, 0x1450, 0x00300008);
1325 nv_mthd(priv, 0x9097, 0x1454, 0x04000080);
1326 nv_mthd(priv, 0x9097, 0x0214, 0x00000000);
1327 /* in trace, right after 0x90c0, not here */
1328 nv_mthd(priv, 0x9097, 0x3410, 0x80002006);
1329}
1330
1331static void
1332nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
1333{
1334 u32 fermi = nvc0_graph_class(priv);
1335 u32 mthd;
1336
1337 if (fermi == 0x9197) {
1338 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1339 nv_mthd(priv, 0x9197, mthd, 0x00000000);
1340 }
1341 nv_mthd(priv, 0x9197, 0x02e4, 0x0000b001);
1342}
1343
1344static void
1345nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
1346{
1347 u32 fermi = nvc0_graph_class(priv);
1348 u32 mthd;
1349
1350 if (fermi == 0x9297) {
1351 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1352 nv_mthd(priv, 0x9297, mthd, 0x00000000);
1353 }
1354 nv_mthd(priv, 0x9297, 0x036c, 0x00000000);
1355 nv_mthd(priv, 0x9297, 0x0370, 0x00000000);
1356 nv_mthd(priv, 0x9297, 0x07a4, 0x00000000);
1357 nv_mthd(priv, 0x9297, 0x07a8, 0x00000000);
1358 nv_mthd(priv, 0x9297, 0x0374, 0x00000000);
1359 nv_mthd(priv, 0x9297, 0x0378, 0x00000020);
1360}
1361
1362static void
1363nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
1364{
1365 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
1366 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
1367 nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
1368 nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
1369 nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
1370 nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
1371 nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
1372 nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
1373 nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
1374 nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
1375 nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
1376 nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
1377 nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
1378 nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
1379 nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
1380 nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
1381 nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
1382}
1383
1384static void
1385nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
1386{
1387 nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
1388 nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
1389 nv_mthd(priv, 0x9039, 0x0314, 0x00000000);
1390 nv_mthd(priv, 0x9039, 0x0320, 0x00000000);
1391 nv_mthd(priv, 0x9039, 0x0238, 0x00000000);
1392 nv_mthd(priv, 0x9039, 0x023c, 0x00000000);
1393 nv_mthd(priv, 0x9039, 0x0318, 0x00000000);
1394 nv_mthd(priv, 0x9039, 0x031c, 0x00000000);
1395}
1396
1397static void
1398nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
1399{
1400 int i;
1401
1402 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1403 nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1404 nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1405 nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
1406 nv_mthd(priv, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
1407 nv_mthd(priv, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
1408 nv_mthd(priv, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
1409 }
1410 nv_mthd(priv, 0x90c0, 0x270c, 0x00000000);
1411 nv_mthd(priv, 0x90c0, 0x272c, 0x00000000);
1412 nv_mthd(priv, 0x90c0, 0x274c, 0x00000000);
1413 nv_mthd(priv, 0x90c0, 0x276c, 0x00000000);
1414 nv_mthd(priv, 0x90c0, 0x278c, 0x00000000);
1415 nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
1416 nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
1417 nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
1418 for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
1419 nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1420 nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1421 nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
1422 nv_mthd(priv, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
1423 }
1424 nv_mthd(priv, 0x90c0, 0x030c, 0x00000001);
1425 nv_mthd(priv, 0x90c0, 0x1944, 0x00000000);
1426 nv_mthd(priv, 0x90c0, 0x0758, 0x00000100);
1427 nv_mthd(priv, 0x90c0, 0x02c4, 0x00000000);
1428 nv_mthd(priv, 0x90c0, 0x0790, 0x00000000);
1429 nv_mthd(priv, 0x90c0, 0x0794, 0x00000000);
1430 nv_mthd(priv, 0x90c0, 0x0798, 0x00000000);
1431 nv_mthd(priv, 0x90c0, 0x079c, 0x00000000);
1432 nv_mthd(priv, 0x90c0, 0x07a0, 0x00000000);
1433 nv_mthd(priv, 0x90c0, 0x077c, 0x00000000);
1434 nv_mthd(priv, 0x90c0, 0x0204, 0x00000000);
1435 nv_mthd(priv, 0x90c0, 0x0208, 0x00000000);
1436 nv_mthd(priv, 0x90c0, 0x020c, 0x00000000);
1437 nv_mthd(priv, 0x90c0, 0x0214, 0x00000000);
1438 nv_mthd(priv, 0x90c0, 0x024c, 0x00000000);
1439 nv_mthd(priv, 0x90c0, 0x0d94, 0x00000001);
1440 nv_mthd(priv, 0x90c0, 0x1608, 0x00000000);
1441 nv_mthd(priv, 0x90c0, 0x160c, 0x00000000);
1442 nv_mthd(priv, 0x90c0, 0x1664, 0x00000000);
1443}
1444
1445static void
1446nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
1447{
1448 int i;
1449
1450 nv_wr32(priv, 0x404004, 0x00000000);
1451 nv_wr32(priv, 0x404008, 0x00000000);
1452 nv_wr32(priv, 0x40400c, 0x00000000);
1453 nv_wr32(priv, 0x404010, 0x00000000);
1454 nv_wr32(priv, 0x404014, 0x00000000);
1455 nv_wr32(priv, 0x404018, 0x00000000);
1456 nv_wr32(priv, 0x40401c, 0x00000000);
1457 nv_wr32(priv, 0x404020, 0x00000000);
1458 nv_wr32(priv, 0x404024, 0x00000000);
1459 nv_wr32(priv, 0x404028, 0x00000000);
1460 nv_wr32(priv, 0x40402c, 0x00000000);
1461 nv_wr32(priv, 0x404044, 0x00000000);
1462 nv_wr32(priv, 0x404094, 0x00000000);
1463 nv_wr32(priv, 0x404098, 0x00000000);
1464 nv_wr32(priv, 0x40409c, 0x00000000);
1465 nv_wr32(priv, 0x4040a0, 0x00000000);
1466 nv_wr32(priv, 0x4040a4, 0x00000000);
1467 nv_wr32(priv, 0x4040a8, 0x00000000);
1468 nv_wr32(priv, 0x4040ac, 0x00000000);
1469 nv_wr32(priv, 0x4040b0, 0x00000000);
1470 nv_wr32(priv, 0x4040b4, 0x00000000);
1471 nv_wr32(priv, 0x4040b8, 0x00000000);
1472 nv_wr32(priv, 0x4040bc, 0x00000000);
1473 nv_wr32(priv, 0x4040c0, 0x00000000);
1474 nv_wr32(priv, 0x4040c4, 0x00000000);
1475 nv_wr32(priv, 0x4040c8, 0xf0000087);
1476 nv_wr32(priv, 0x4040d4, 0x00000000);
1477 nv_wr32(priv, 0x4040d8, 0x00000000);
1478 nv_wr32(priv, 0x4040dc, 0x00000000);
1479 nv_wr32(priv, 0x4040e0, 0x00000000);
1480 nv_wr32(priv, 0x4040e4, 0x00000000);
1481 nv_wr32(priv, 0x4040e8, 0x00001000);
1482 nv_wr32(priv, 0x4040f8, 0x00000000);
1483 nv_wr32(priv, 0x404130, 0x00000000);
1484 nv_wr32(priv, 0x404134, 0x00000000);
1485 nv_wr32(priv, 0x404138, 0x20000040);
1486 nv_wr32(priv, 0x404150, 0x0000002e);
1487 nv_wr32(priv, 0x404154, 0x00000400);
1488 nv_wr32(priv, 0x404158, 0x00000200);
1489 nv_wr32(priv, 0x404164, 0x00000055);
1490 nv_wr32(priv, 0x404168, 0x00000000);
1491 nv_wr32(priv, 0x404174, 0x00000000);
1492 nv_wr32(priv, 0x404178, 0x00000000);
1493 nv_wr32(priv, 0x40417c, 0x00000000);
1494 for (i = 0; i < 8; i++)
1495 nv_wr32(priv, 0x404200 + (i * 4), 0x00000000); /* subc */
1496}
1497
1498static void
1499nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
1500{
1501 nv_wr32(priv, 0x404404, 0x00000000);
1502 nv_wr32(priv, 0x404408, 0x00000000);
1503 nv_wr32(priv, 0x40440c, 0x00000000);
1504 nv_wr32(priv, 0x404410, 0x00000000);
1505 nv_wr32(priv, 0x404414, 0x00000000);
1506 nv_wr32(priv, 0x404418, 0x00000000);
1507 nv_wr32(priv, 0x40441c, 0x00000000);
1508 nv_wr32(priv, 0x404420, 0x00000000);
1509 nv_wr32(priv, 0x404424, 0x00000000);
1510 nv_wr32(priv, 0x404428, 0x00000000);
1511 nv_wr32(priv, 0x40442c, 0x00000000);
1512 nv_wr32(priv, 0x404430, 0x00000000);
1513 nv_wr32(priv, 0x404434, 0x00000000);
1514 nv_wr32(priv, 0x404438, 0x00000000);
1515 nv_wr32(priv, 0x404460, 0x00000000);
1516 nv_wr32(priv, 0x404464, 0x00000000);
1517 nv_wr32(priv, 0x404468, 0x00ffffff);
1518 nv_wr32(priv, 0x40446c, 0x00000000);
1519 nv_wr32(priv, 0x404480, 0x00000001);
1520 nv_wr32(priv, 0x404498, 0x00000001);
1521}
1522
1523static void
1524nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
1525{
1526 nv_wr32(priv, 0x404604, 0x00000015);
1527 nv_wr32(priv, 0x404608, 0x00000000);
1528 nv_wr32(priv, 0x40460c, 0x00002e00);
1529 nv_wr32(priv, 0x404610, 0x00000100);
1530 nv_wr32(priv, 0x404618, 0x00000000);
1531 nv_wr32(priv, 0x40461c, 0x00000000);
1532 nv_wr32(priv, 0x404620, 0x00000000);
1533 nv_wr32(priv, 0x404624, 0x00000000);
1534 nv_wr32(priv, 0x404628, 0x00000000);
1535 nv_wr32(priv, 0x40462c, 0x00000000);
1536 nv_wr32(priv, 0x404630, 0x00000000);
1537 nv_wr32(priv, 0x404634, 0x00000000);
1538 nv_wr32(priv, 0x404638, 0x00000004);
1539 nv_wr32(priv, 0x40463c, 0x00000000);
1540 nv_wr32(priv, 0x404640, 0x00000000);
1541 nv_wr32(priv, 0x404644, 0x00000000);
1542 nv_wr32(priv, 0x404648, 0x00000000);
1543 nv_wr32(priv, 0x40464c, 0x00000000);
1544 nv_wr32(priv, 0x404650, 0x00000000);
1545 nv_wr32(priv, 0x404654, 0x00000000);
1546 nv_wr32(priv, 0x404658, 0x00000000);
1547 nv_wr32(priv, 0x40465c, 0x007f0100);
1548 nv_wr32(priv, 0x404660, 0x00000000);
1549 nv_wr32(priv, 0x404664, 0x00000000);
1550 nv_wr32(priv, 0x404668, 0x00000000);
1551 nv_wr32(priv, 0x40466c, 0x00000000);
1552 nv_wr32(priv, 0x404670, 0x00000000);
1553 nv_wr32(priv, 0x404674, 0x00000000);
1554 nv_wr32(priv, 0x404678, 0x00000000);
1555 nv_wr32(priv, 0x40467c, 0x00000002);
1556 nv_wr32(priv, 0x404680, 0x00000000);
1557 nv_wr32(priv, 0x404684, 0x00000000);
1558 nv_wr32(priv, 0x404688, 0x00000000);
1559 nv_wr32(priv, 0x40468c, 0x00000000);
1560 nv_wr32(priv, 0x404690, 0x00000000);
1561 nv_wr32(priv, 0x404694, 0x00000000);
1562 nv_wr32(priv, 0x404698, 0x00000000);
1563 nv_wr32(priv, 0x40469c, 0x00000000);
1564 nv_wr32(priv, 0x4046a0, 0x007f0080);
1565 nv_wr32(priv, 0x4046a4, 0x00000000);
1566 nv_wr32(priv, 0x4046a8, 0x00000000);
1567 nv_wr32(priv, 0x4046ac, 0x00000000);
1568 nv_wr32(priv, 0x4046b0, 0x00000000);
1569 nv_wr32(priv, 0x4046b4, 0x00000000);
1570 nv_wr32(priv, 0x4046b8, 0x00000000);
1571 nv_wr32(priv, 0x4046bc, 0x00000000);
1572 nv_wr32(priv, 0x4046c0, 0x00000000);
1573 nv_wr32(priv, 0x4046c4, 0x00000000);
1574 nv_wr32(priv, 0x4046c8, 0x00000000);
1575 nv_wr32(priv, 0x4046cc, 0x00000000);
1576 nv_wr32(priv, 0x4046d0, 0x00000000);
1577 nv_wr32(priv, 0x4046d4, 0x00000000);
1578 nv_wr32(priv, 0x4046d8, 0x00000000);
1579 nv_wr32(priv, 0x4046dc, 0x00000000);
1580 nv_wr32(priv, 0x4046e0, 0x00000000);
1581 nv_wr32(priv, 0x4046e4, 0x00000000);
1582 nv_wr32(priv, 0x4046e8, 0x00000000);
1583 nv_wr32(priv, 0x4046f0, 0x00000000);
1584 nv_wr32(priv, 0x4046f4, 0x00000000);
1585}
1586
1587static void
1588nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
1589{
1590 nv_wr32(priv, 0x404700, 0x00000000);
1591 nv_wr32(priv, 0x404704, 0x00000000);
1592 nv_wr32(priv, 0x404708, 0x00000000);
1593 nv_wr32(priv, 0x40470c, 0x00000000);
1594 nv_wr32(priv, 0x404710, 0x00000000);
1595 nv_wr32(priv, 0x404714, 0x00000000);
1596 nv_wr32(priv, 0x404718, 0x00000000);
1597 nv_wr32(priv, 0x40471c, 0x00000000);
1598 nv_wr32(priv, 0x404720, 0x00000000);
1599 nv_wr32(priv, 0x404724, 0x00000000);
1600 nv_wr32(priv, 0x404728, 0x00000000);
1601 nv_wr32(priv, 0x40472c, 0x00000000);
1602 nv_wr32(priv, 0x404730, 0x00000000);
1603 nv_wr32(priv, 0x404734, 0x00000100);
1604 nv_wr32(priv, 0x404738, 0x00000000);
1605 nv_wr32(priv, 0x40473c, 0x00000000);
1606 nv_wr32(priv, 0x404740, 0x00000000);
1607 nv_wr32(priv, 0x404744, 0x00000000);
1608 nv_wr32(priv, 0x404748, 0x00000000);
1609 nv_wr32(priv, 0x40474c, 0x00000000);
1610 nv_wr32(priv, 0x404750, 0x00000000);
1611 nv_wr32(priv, 0x404754, 0x00000000);
1612}
1613
1614static void
1615nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
1616{
1617
1618 if (nv_device(priv)->chipset == 0xd9) {
1619 nv_wr32(priv, 0x405800, 0x0f8000bf);
1620 nv_wr32(priv, 0x405830, 0x02180218);
1621 nv_wr32(priv, 0x405834, 0x08000000);
1622 } else
1623 if (nv_device(priv)->chipset == 0xc1) {
1624 nv_wr32(priv, 0x405800, 0x0f8000bf);
1625 nv_wr32(priv, 0x405830, 0x02180218);
1626 nv_wr32(priv, 0x405834, 0x00000000);
1627 } else {
1628 nv_wr32(priv, 0x405800, 0x078000bf);
1629 nv_wr32(priv, 0x405830, 0x02180000);
1630 nv_wr32(priv, 0x405834, 0x00000000);
1631 }
1632 nv_wr32(priv, 0x405838, 0x00000000);
1633 nv_wr32(priv, 0x405854, 0x00000000);
1634 nv_wr32(priv, 0x405870, 0x00000001);
1635 nv_wr32(priv, 0x405874, 0x00000001);
1636 nv_wr32(priv, 0x405878, 0x00000001);
1637 nv_wr32(priv, 0x40587c, 0x00000001);
1638 nv_wr32(priv, 0x405a00, 0x00000000);
1639 nv_wr32(priv, 0x405a04, 0x00000000);
1640 nv_wr32(priv, 0x405a18, 0x00000000);
1641}
1642
1643static void
1644nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
1645{
1646 nv_wr32(priv, 0x406020, 0x000103c1);
1647 nv_wr32(priv, 0x406028, 0x00000001);
1648 nv_wr32(priv, 0x40602c, 0x00000001);
1649 nv_wr32(priv, 0x406030, 0x00000001);
1650 nv_wr32(priv, 0x406034, 0x00000001);
1651}
1652
1653static void
1654nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
1655{
1656
1657 nv_wr32(priv, 0x4064a8, 0x00000000);
1658 nv_wr32(priv, 0x4064ac, 0x00003fff);
1659 nv_wr32(priv, 0x4064b4, 0x00000000);
1660 nv_wr32(priv, 0x4064b8, 0x00000000);
1661 if (nv_device(priv)->chipset == 0xd9)
1662 nv_wr32(priv, 0x4064bc, 0x00000000);
1663 if (nv_device(priv)->chipset == 0xc1 ||
1664 nv_device(priv)->chipset == 0xd9) {
1665 nv_wr32(priv, 0x4064c0, 0x80140078);
1666 nv_wr32(priv, 0x4064c4, 0x0086ffff);
1667 }
1668}
1669
1670static void
1671nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
1672{
1673 nv_wr32(priv, 0x407804, 0x00000023);
1674 nv_wr32(priv, 0x40780c, 0x0a418820);
1675 nv_wr32(priv, 0x407810, 0x062080e6);
1676 nv_wr32(priv, 0x407814, 0x020398a4);
1677 nv_wr32(priv, 0x407818, 0x0e629062);
1678 nv_wr32(priv, 0x40781c, 0x0a418820);
1679 nv_wr32(priv, 0x407820, 0x000000e6);
1680 nv_wr32(priv, 0x4078bc, 0x00000103);
1681}
1682
1683static void
1684nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
1685{
1686 nv_wr32(priv, 0x408000, 0x00000000);
1687 nv_wr32(priv, 0x408004, 0x00000000);
1688 nv_wr32(priv, 0x408008, 0x00000018);
1689 nv_wr32(priv, 0x40800c, 0x00000000);
1690 nv_wr32(priv, 0x408010, 0x00000000);
1691 nv_wr32(priv, 0x408014, 0x00000069);
1692 nv_wr32(priv, 0x408018, 0xe100e100);
1693 nv_wr32(priv, 0x408064, 0x00000000);
1694}
1695
1696static void
1697nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
1698{
1699 int chipset = nv_device(priv)->chipset;
1700
1701 /* ROPC_BROADCAST */
1702 nv_wr32(priv, 0x408800, 0x02802a3c);
1703 nv_wr32(priv, 0x408804, 0x00000040);
1704 if (chipset == 0xd9) {
1705 nv_wr32(priv, 0x408808, 0x1043e005);
1706 nv_wr32(priv, 0x408900, 0x3080b801);
1707 nv_wr32(priv, 0x408904, 0x1043e005);
1708 nv_wr32(priv, 0x408908, 0x00c8102f);
1709 } else
1710 if (chipset == 0xc1) {
1711 nv_wr32(priv, 0x408808, 0x1003e005);
1712 nv_wr32(priv, 0x408900, 0x3080b801);
1713 nv_wr32(priv, 0x408904, 0x62000001);
1714 nv_wr32(priv, 0x408908, 0x00c80929);
1715 } else {
1716 nv_wr32(priv, 0x408808, 0x0003e00d);
1717 nv_wr32(priv, 0x408900, 0x3080b801);
1718 nv_wr32(priv, 0x408904, 0x02000001);
1719 nv_wr32(priv, 0x408908, 0x00c80929);
1720 }
1721 nv_wr32(priv, 0x40890c, 0x00000000);
1722 nv_wr32(priv, 0x408980, 0x0000011d);
1723}
1724
1725static void
1726nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
1727{
1728 int chipset = nv_device(priv)->chipset;
1729 int i;
1730
1731 /* GPC_BROADCAST */
1732 nv_wr32(priv, 0x418380, 0x00000016);
1733 nv_wr32(priv, 0x418400, 0x38004e00);
1734 nv_wr32(priv, 0x418404, 0x71e0ffff);
1735 nv_wr32(priv, 0x418408, 0x00000000);
1736 nv_wr32(priv, 0x41840c, 0x00001008);
1737 nv_wr32(priv, 0x418410, 0x0fff0fff);
1738 nv_wr32(priv, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
1739 nv_wr32(priv, 0x418450, 0x00000000);
1740 nv_wr32(priv, 0x418454, 0x00000000);
1741 nv_wr32(priv, 0x418458, 0x00000000);
1742 nv_wr32(priv, 0x41845c, 0x00000000);
1743 nv_wr32(priv, 0x418460, 0x00000000);
1744 nv_wr32(priv, 0x418464, 0x00000000);
1745 nv_wr32(priv, 0x418468, 0x00000001);
1746 nv_wr32(priv, 0x41846c, 0x00000000);
1747 nv_wr32(priv, 0x418470, 0x00000000);
1748 nv_wr32(priv, 0x418600, 0x0000001f);
1749 nv_wr32(priv, 0x418684, 0x0000000f);
1750 nv_wr32(priv, 0x418700, 0x00000002);
1751 nv_wr32(priv, 0x418704, 0x00000080);
1752 nv_wr32(priv, 0x418708, 0x00000000);
1753 nv_wr32(priv, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
1754 nv_wr32(priv, 0x418710, 0x00000000);
1755 nv_wr32(priv, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
1756 nv_wr32(priv, 0x418808, 0x00000000);
1757 nv_wr32(priv, 0x41880c, 0x00000000);
1758 nv_wr32(priv, 0x418810, 0x00000000);
1759 nv_wr32(priv, 0x418828, 0x00008442);
1760 if (chipset == 0xc1 || chipset == 0xd9)
1761 nv_wr32(priv, 0x418830, 0x10000001);
1762 else
1763 nv_wr32(priv, 0x418830, 0x00000001);
1764 nv_wr32(priv, 0x4188d8, 0x00000008);
1765 nv_wr32(priv, 0x4188e0, 0x01000000);
1766 nv_wr32(priv, 0x4188e8, 0x00000000);
1767 nv_wr32(priv, 0x4188ec, 0x00000000);
1768 nv_wr32(priv, 0x4188f0, 0x00000000);
1769 nv_wr32(priv, 0x4188f4, 0x00000000);
1770 nv_wr32(priv, 0x4188f8, 0x00000000);
1771 if (chipset == 0xd9)
1772 nv_wr32(priv, 0x4188fc, 0x20100008);
1773 else if (chipset == 0xc1)
1774 nv_wr32(priv, 0x4188fc, 0x00100018);
1775 else
1776 nv_wr32(priv, 0x4188fc, 0x00100000);
1777 nv_wr32(priv, 0x41891c, 0x00ff00ff);
1778 nv_wr32(priv, 0x418924, 0x00000000);
1779 nv_wr32(priv, 0x418928, 0x00ffff00);
1780 nv_wr32(priv, 0x41892c, 0x0000ff00);
1781 for (i = 0; i < 8; i++) {
1782 nv_wr32(priv, 0x418a00 + (i * 0x20), 0x00000000);
1783 nv_wr32(priv, 0x418a04 + (i * 0x20), 0x00000000);
1784 nv_wr32(priv, 0x418a08 + (i * 0x20), 0x00000000);
1785 nv_wr32(priv, 0x418a0c + (i * 0x20), 0x00010000);
1786 nv_wr32(priv, 0x418a10 + (i * 0x20), 0x00000000);
1787 nv_wr32(priv, 0x418a14 + (i * 0x20), 0x00000000);
1788 nv_wr32(priv, 0x418a18 + (i * 0x20), 0x00000000);
1789 }
1790 nv_wr32(priv, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
1791 nv_wr32(priv, 0x418b08, 0x0a418820);
1792 nv_wr32(priv, 0x418b0c, 0x062080e6);
1793 nv_wr32(priv, 0x418b10, 0x020398a4);
1794 nv_wr32(priv, 0x418b14, 0x0e629062);
1795 nv_wr32(priv, 0x418b18, 0x0a418820);
1796 nv_wr32(priv, 0x418b1c, 0x000000e6);
1797 nv_wr32(priv, 0x418bb8, 0x00000103);
1798 nv_wr32(priv, 0x418c08, 0x00000001);
1799 nv_wr32(priv, 0x418c10, 0x00000000);
1800 nv_wr32(priv, 0x418c14, 0x00000000);
1801 nv_wr32(priv, 0x418c18, 0x00000000);
1802 nv_wr32(priv, 0x418c1c, 0x00000000);
1803 nv_wr32(priv, 0x418c20, 0x00000000);
1804 nv_wr32(priv, 0x418c24, 0x00000000);
1805 nv_wr32(priv, 0x418c28, 0x00000000);
1806 nv_wr32(priv, 0x418c2c, 0x00000000);
1807 if (chipset == 0xc1 || chipset == 0xd9)
1808 nv_wr32(priv, 0x418c6c, 0x00000001);
1809 nv_wr32(priv, 0x418c80, 0x20200004);
1810 nv_wr32(priv, 0x418c8c, 0x00000001);
1811 nv_wr32(priv, 0x419000, 0x00000780);
1812 nv_wr32(priv, 0x419004, 0x00000000);
1813 nv_wr32(priv, 0x419008, 0x00000000);
1814 nv_wr32(priv, 0x419014, 0x00000004);
1815}
1816
1817static void
1818nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
1819{
1820 int chipset = nv_device(priv)->chipset;
1821
1822 /* GPC_BROADCAST.TP_BROADCAST */
1823 nv_wr32(priv, 0x419818, 0x00000000);
1824 nv_wr32(priv, 0x41983c, 0x00038bc7);
1825 nv_wr32(priv, 0x419848, 0x00000000);
1826 if (chipset == 0xc1 || chipset == 0xd9)
1827 nv_wr32(priv, 0x419864, 0x00000129);
1828 else
1829 nv_wr32(priv, 0x419864, 0x0000012a);
1830 nv_wr32(priv, 0x419888, 0x00000000);
1831 nv_wr32(priv, 0x419a00, 0x000001f0);
1832 nv_wr32(priv, 0x419a04, 0x00000001);
1833 nv_wr32(priv, 0x419a08, 0x00000023);
1834 nv_wr32(priv, 0x419a0c, 0x00020000);
1835 nv_wr32(priv, 0x419a10, 0x00000000);
1836 nv_wr32(priv, 0x419a14, 0x00000200);
1837 nv_wr32(priv, 0x419a1c, 0x00000000);
1838 nv_wr32(priv, 0x419a20, 0x00000800);
1839 if (chipset == 0xd9)
1840 nv_wr32(priv, 0x00419ac4, 0x0017f440);
1841 else if (chipset != 0xc0 && chipset != 0xc8)
1842 nv_wr32(priv, 0x00419ac4, 0x0007f440);
1843 nv_wr32(priv, 0x419b00, 0x0a418820);
1844 nv_wr32(priv, 0x419b04, 0x062080e6);
1845 nv_wr32(priv, 0x419b08, 0x020398a4);
1846 nv_wr32(priv, 0x419b0c, 0x0e629062);
1847 nv_wr32(priv, 0x419b10, 0x0a418820);
1848 nv_wr32(priv, 0x419b14, 0x000000e6);
1849 nv_wr32(priv, 0x419bd0, 0x00900103);
1850 if (chipset == 0xc1 || chipset == 0xd9)
1851 nv_wr32(priv, 0x419be0, 0x00400001);
1852 else
1853 nv_wr32(priv, 0x419be0, 0x00000001);
1854 nv_wr32(priv, 0x419be4, 0x00000000);
1855 nv_wr32(priv, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
1856 nv_wr32(priv, 0x419c04, 0x00000006);
1857 nv_wr32(priv, 0x419c08, 0x00000002);
1858 nv_wr32(priv, 0x419c20, 0x00000000);
1859 if (nv_device(priv)->chipset == 0xd9) {
1860 nv_wr32(priv, 0x419c24, 0x00084210);
1861 nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
1862 nv_wr32(priv, 0x419cb0, 0x00020048);
1863 } else
1864 if (chipset == 0xce || chipset == 0xcf) {
1865 nv_wr32(priv, 0x419cb0, 0x00020048);
1866 } else {
1867 nv_wr32(priv, 0x419cb0, 0x00060048);
1868 }
1869 nv_wr32(priv, 0x419ce8, 0x00000000);
1870 nv_wr32(priv, 0x419cf4, 0x00000183);
1871 if (chipset == 0xc1 || chipset == 0xd9)
1872 nv_wr32(priv, 0x419d20, 0x12180000);
1873 else
1874 nv_wr32(priv, 0x419d20, 0x02180000);
1875 nv_wr32(priv, 0x419d24, 0x00001fff);
1876 if (chipset == 0xc1 || chipset == 0xd9)
1877 nv_wr32(priv, 0x419d44, 0x02180218);
1878 nv_wr32(priv, 0x419e04, 0x00000000);
1879 nv_wr32(priv, 0x419e08, 0x00000000);
1880 nv_wr32(priv, 0x419e0c, 0x00000000);
1881 nv_wr32(priv, 0x419e10, 0x00000002);
1882 nv_wr32(priv, 0x419e44, 0x001beff2);
1883 nv_wr32(priv, 0x419e48, 0x00000000);
1884 nv_wr32(priv, 0x419e4c, 0x0000000f);
1885 nv_wr32(priv, 0x419e50, 0x00000000);
1886 nv_wr32(priv, 0x419e54, 0x00000000);
1887 nv_wr32(priv, 0x419e58, 0x00000000);
1888 nv_wr32(priv, 0x419e5c, 0x00000000);
1889 nv_wr32(priv, 0x419e60, 0x00000000);
1890 nv_wr32(priv, 0x419e64, 0x00000000);
1891 nv_wr32(priv, 0x419e68, 0x00000000);
1892 nv_wr32(priv, 0x419e6c, 0x00000000);
1893 nv_wr32(priv, 0x419e70, 0x00000000);
1894 nv_wr32(priv, 0x419e74, 0x00000000);
1895 nv_wr32(priv, 0x419e78, 0x00000000);
1896 nv_wr32(priv, 0x419e7c, 0x00000000);
1897 nv_wr32(priv, 0x419e80, 0x00000000);
1898 nv_wr32(priv, 0x419e84, 0x00000000);
1899 nv_wr32(priv, 0x419e88, 0x00000000);
1900 nv_wr32(priv, 0x419e8c, 0x00000000);
1901 nv_wr32(priv, 0x419e90, 0x00000000);
1902 nv_wr32(priv, 0x419e98, 0x00000000);
1903 if (chipset != 0xc0 && chipset != 0xc8)
1904 nv_wr32(priv, 0x419ee0, 0x00011110);
1905 nv_wr32(priv, 0x419f50, 0x00000000);
1906 nv_wr32(priv, 0x419f54, 0x00000000);
1907 if (chipset != 0xc0 && chipset != 0xc8)
1908 nv_wr32(priv, 0x419f58, 0x00000000);
1909}
1910
1911int
1912nvc0_grctx_generate(struct nvc0_graph_priv *priv)
1913{
1914 struct nvc0_grctx info;
1915 int ret, i, gpc, tpc, id;
1916 u32 fermi = nvc0_graph_class(priv);
1917 u32 r000260, tmp;
1918
1919 ret = nvc0_grctx_init(priv, &info);
1920 if (ret)
1921 return ret;
1922
1923 r000260 = nv_rd32(priv, 0x000260);
1924 nv_wr32(priv, 0x000260, r000260 & ~1);
1925 nv_wr32(priv, 0x400208, 0x00000000);
1926
1927 nvc0_grctx_generate_dispatch(priv);
1928 nvc0_grctx_generate_macro(priv);
1929 nvc0_grctx_generate_m2mf(priv);
1930 nvc0_grctx_generate_unk47xx(priv);
1931 nvc0_grctx_generate_shaders(priv);
1932 nvc0_grctx_generate_unk60xx(priv);
1933 nvc0_grctx_generate_unk64xx(priv);
1934 nvc0_grctx_generate_tpbus(priv);
1935 nvc0_grctx_generate_ccache(priv);
1936 nvc0_grctx_generate_rop(priv);
1937 nvc0_grctx_generate_gpc(priv);
1938 nvc0_grctx_generate_tp(priv);
1939
1940 nv_wr32(priv, 0x404154, 0x00000000);
1941
1942 /* generate per-context mmio list data */
1943 mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
1944 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
1945 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
1946 mmio_list(0x408004, 0x00000000, 8, 0);
1947 mmio_list(0x408008, 0x80000018, 0, 0);
1948 mmio_list(0x40800c, 0x00000000, 8, 1);
1949 mmio_list(0x408010, 0x80000000, 0, 0);
1950 mmio_list(0x418810, 0x80000000, 12, 2);
1951 mmio_list(0x419848, 0x10000000, 12, 2);
1952 mmio_list(0x419004, 0x00000000, 8, 1);
1953 mmio_list(0x419008, 0x00000000, 0, 0);
1954 mmio_list(0x418808, 0x00000000, 8, 0);
1955 mmio_list(0x41880c, 0x80000018, 0, 0);
1956 if (nv_device(priv)->chipset != 0xc1) {
1957 tmp = 0x02180000;
1958 mmio_list(0x405830, tmp, 0, 0);
1959 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1960 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1961 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1962 mmio_list(reg, tmp, 0, 0);
1963 tmp += 0x0324;
1964 }
1965 }
1966 } else {
1967 tmp = 0x02180000;
1968 mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
1969 mmio_list(0x4064c4, 0x0086ffff, 0, 0);
1970 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1971 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1972 u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
1973 mmio_list(reg, 0x10000000 | tmp, 0, 0);
1974 tmp += 0x0324;
1975 }
1976 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
1977 u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
1978 mmio_list(reg, tmp, 0, 0);
1979 tmp += 0x0324;
1980 }
1981 }
1982 }
1983
1984 for (tpc = 0, id = 0; tpc < 4; tpc++) {
1985 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1986 if (tpc < priv->tpc_nr[gpc]) {
1987 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
1988 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
1989 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
1990 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x088), id);
1991 id++;
1992 }
1993
1994 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
1995 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
1996 }
1997 }
1998
1999 tmp = 0;
2000 for (i = 0; i < priv->gpc_nr; i++)
2001 tmp |= priv->tpc_nr[i] << (i * 4);
2002 nv_wr32(priv, 0x406028, tmp);
2003 nv_wr32(priv, 0x405870, tmp);
2004
2005 nv_wr32(priv, 0x40602c, 0x00000000);
2006 nv_wr32(priv, 0x405874, 0x00000000);
2007 nv_wr32(priv, 0x406030, 0x00000000);
2008 nv_wr32(priv, 0x405878, 0x00000000);
2009 nv_wr32(priv, 0x406034, 0x00000000);
2010 nv_wr32(priv, 0x40587c, 0x00000000);
2011
2012 if (1) {
2013 u8 tpcnr[GPC_MAX], data[TPC_MAX];
2014
2015 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2016 memset(data, 0x1f, sizeof(data));
2017
2018 gpc = -1;
2019 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2020 do {
2021 gpc = (gpc + 1) % priv->gpc_nr;
2022 } while (!tpcnr[gpc]);
2023 tpcnr[gpc]--;
2024 data[tpc] = gpc;
2025 }
2026
2027 for (i = 0; i < 4; i++)
2028 nv_wr32(priv, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
2029 }
2030
2031 if (1) {
2032 u32 data[6] = {}, data2[2] = {};
2033 u8 tpcnr[GPC_MAX];
2034 u8 shift, ntpcv;
2035
2036 /* calculate first set of magics */
2037 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2038
2039 gpc = -1;
2040 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2041 do {
2042 gpc = (gpc + 1) % priv->gpc_nr;
2043 } while (!tpcnr[gpc]);
2044 tpcnr[gpc]--;
2045
2046 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2047 }
2048
2049 for (; tpc < 32; tpc++)
2050 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2051
2052 /* and the second... */
2053 shift = 0;
2054 ntpcv = priv->tpc_total;
2055 while (!(ntpcv & (1 << 4))) {
2056 ntpcv <<= 1;
2057 shift++;
2058 }
2059
2060 data2[0] = (ntpcv << 16);
2061 data2[0] |= (shift << 21);
2062 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2063 for (i = 1; i < 7; i++)
2064 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2065
2066 /* GPC_BROADCAST */
2067 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
2068 priv->magic_not_rop_nr);
2069 for (i = 0; i < 6; i++)
2070 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2071
2072 /* GPC_BROADCAST.TP_BROADCAST */
2073 nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
2074 priv->magic_not_rop_nr |
2075 data2[0]);
2076 nv_wr32(priv, 0x419be4, data2[1]);
2077 for (i = 0; i < 6; i++)
2078 nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
2079
2080 /* UNK78xx */
2081 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
2082 priv->magic_not_rop_nr);
2083 for (i = 0; i < 6; i++)
2084 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2085 }
2086
2087 if (1) {
2088 u32 tpc_mask = 0, tpc_set = 0;
2089 u8 tpcnr[GPC_MAX], a, b;
2090
2091 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2092 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2093 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2094
2095 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2096 a = (i * (priv->tpc_total - 1)) / 32;
2097 if (a != b) {
2098 b = a;
2099 do {
2100 gpc = (gpc + 1) % priv->gpc_nr;
2101 } while (!tpcnr[gpc]);
2102 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2103
2104 tpc_set |= 1 << ((gpc * 8) + tpc);
2105 }
2106
2107 nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
2108 nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2109 }
2110 }
2111
2112 nv_wr32(priv, 0x400208, 0x80000000);
2113
2114 nv_icmd(priv, 0x00001000, 0x00000004);
2115 nv_icmd(priv, 0x000000a9, 0x0000ffff);
2116 nv_icmd(priv, 0x00000038, 0x0fac6881);
2117 nv_icmd(priv, 0x0000003d, 0x00000001);
2118 nv_icmd(priv, 0x000000e8, 0x00000400);
2119 nv_icmd(priv, 0x000000e9, 0x00000400);
2120 nv_icmd(priv, 0x000000ea, 0x00000400);
2121 nv_icmd(priv, 0x000000eb, 0x00000400);
2122 nv_icmd(priv, 0x000000ec, 0x00000400);
2123 nv_icmd(priv, 0x000000ed, 0x00000400);
2124 nv_icmd(priv, 0x000000ee, 0x00000400);
2125 nv_icmd(priv, 0x000000ef, 0x00000400);
2126 nv_icmd(priv, 0x00000078, 0x00000300);
2127 nv_icmd(priv, 0x00000079, 0x00000300);
2128 nv_icmd(priv, 0x0000007a, 0x00000300);
2129 nv_icmd(priv, 0x0000007b, 0x00000300);
2130 nv_icmd(priv, 0x0000007c, 0x00000300);
2131 nv_icmd(priv, 0x0000007d, 0x00000300);
2132 nv_icmd(priv, 0x0000007e, 0x00000300);
2133 nv_icmd(priv, 0x0000007f, 0x00000300);
2134 nv_icmd(priv, 0x00000050, 0x00000011);
2135 nv_icmd(priv, 0x00000058, 0x00000008);
2136 nv_icmd(priv, 0x00000059, 0x00000008);
2137 nv_icmd(priv, 0x0000005a, 0x00000008);
2138 nv_icmd(priv, 0x0000005b, 0x00000008);
2139 nv_icmd(priv, 0x0000005c, 0x00000008);
2140 nv_icmd(priv, 0x0000005d, 0x00000008);
2141 nv_icmd(priv, 0x0000005e, 0x00000008);
2142 nv_icmd(priv, 0x0000005f, 0x00000008);
2143 nv_icmd(priv, 0x00000208, 0x00000001);
2144 nv_icmd(priv, 0x00000209, 0x00000001);
2145 nv_icmd(priv, 0x0000020a, 0x00000001);
2146 nv_icmd(priv, 0x0000020b, 0x00000001);
2147 nv_icmd(priv, 0x0000020c, 0x00000001);
2148 nv_icmd(priv, 0x0000020d, 0x00000001);
2149 nv_icmd(priv, 0x0000020e, 0x00000001);
2150 nv_icmd(priv, 0x0000020f, 0x00000001);
2151 nv_icmd(priv, 0x00000081, 0x00000001);
2152 nv_icmd(priv, 0x00000085, 0x00000004);
2153 nv_icmd(priv, 0x00000088, 0x00000400);
2154 nv_icmd(priv, 0x00000090, 0x00000300);
2155 nv_icmd(priv, 0x00000098, 0x00001001);
2156 nv_icmd(priv, 0x000000e3, 0x00000001);
2157 nv_icmd(priv, 0x000000da, 0x00000001);
2158 nv_icmd(priv, 0x000000f8, 0x00000003);
2159 nv_icmd(priv, 0x000000fa, 0x00000001);
2160 nv_icmd(priv, 0x0000009f, 0x0000ffff);
2161 nv_icmd(priv, 0x000000a0, 0x0000ffff);
2162 nv_icmd(priv, 0x000000a1, 0x0000ffff);
2163 nv_icmd(priv, 0x000000a2, 0x0000ffff);
2164 nv_icmd(priv, 0x000000b1, 0x00000001);
2165 nv_icmd(priv, 0x000000b2, 0x00000000);
2166 nv_icmd(priv, 0x000000b3, 0x00000000);
2167 nv_icmd(priv, 0x000000b4, 0x00000000);
2168 nv_icmd(priv, 0x000000b5, 0x00000000);
2169 nv_icmd(priv, 0x000000b6, 0x00000000);
2170 nv_icmd(priv, 0x000000b7, 0x00000000);
2171 nv_icmd(priv, 0x000000b8, 0x00000000);
2172 nv_icmd(priv, 0x000000b9, 0x00000000);
2173 nv_icmd(priv, 0x000000ba, 0x00000000);
2174 nv_icmd(priv, 0x000000bb, 0x00000000);
2175 nv_icmd(priv, 0x000000bc, 0x00000000);
2176 nv_icmd(priv, 0x000000bd, 0x00000000);
2177 nv_icmd(priv, 0x000000be, 0x00000000);
2178 nv_icmd(priv, 0x000000bf, 0x00000000);
2179 nv_icmd(priv, 0x000000c0, 0x00000000);
2180 nv_icmd(priv, 0x000000c1, 0x00000000);
2181 nv_icmd(priv, 0x000000c2, 0x00000000);
2182 nv_icmd(priv, 0x000000c3, 0x00000000);
2183 nv_icmd(priv, 0x000000c4, 0x00000000);
2184 nv_icmd(priv, 0x000000c5, 0x00000000);
2185 nv_icmd(priv, 0x000000c6, 0x00000000);
2186 nv_icmd(priv, 0x000000c7, 0x00000000);
2187 nv_icmd(priv, 0x000000c8, 0x00000000);
2188 nv_icmd(priv, 0x000000c9, 0x00000000);
2189 nv_icmd(priv, 0x000000ca, 0x00000000);
2190 nv_icmd(priv, 0x000000cb, 0x00000000);
2191 nv_icmd(priv, 0x000000cc, 0x00000000);
2192 nv_icmd(priv, 0x000000cd, 0x00000000);
2193 nv_icmd(priv, 0x000000ce, 0x00000000);
2194 nv_icmd(priv, 0x000000cf, 0x00000000);
2195 nv_icmd(priv, 0x000000d0, 0x00000000);
2196 nv_icmd(priv, 0x000000d1, 0x00000000);
2197 nv_icmd(priv, 0x000000d2, 0x00000000);
2198 nv_icmd(priv, 0x000000d3, 0x00000000);
2199 nv_icmd(priv, 0x000000d4, 0x00000000);
2200 nv_icmd(priv, 0x000000d5, 0x00000000);
2201 nv_icmd(priv, 0x000000d6, 0x00000000);
2202 nv_icmd(priv, 0x000000d7, 0x00000000);
2203 nv_icmd(priv, 0x000000d8, 0x00000000);
2204 nv_icmd(priv, 0x000000d9, 0x00000000);
2205 nv_icmd(priv, 0x00000210, 0x00000040);
2206 nv_icmd(priv, 0x00000211, 0x00000040);
2207 nv_icmd(priv, 0x00000212, 0x00000040);
2208 nv_icmd(priv, 0x00000213, 0x00000040);
2209 nv_icmd(priv, 0x00000214, 0x00000040);
2210 nv_icmd(priv, 0x00000215, 0x00000040);
2211 nv_icmd(priv, 0x00000216, 0x00000040);
2212 nv_icmd(priv, 0x00000217, 0x00000040);
2213 if (nv_device(priv)->chipset == 0xd9) {
2214 for (i = 0x0400; i <= 0x0417; i++)
2215 nv_icmd(priv, i, 0x00000040);
2216 }
2217 nv_icmd(priv, 0x00000218, 0x0000c080);
2218 nv_icmd(priv, 0x00000219, 0x0000c080);
2219 nv_icmd(priv, 0x0000021a, 0x0000c080);
2220 nv_icmd(priv, 0x0000021b, 0x0000c080);
2221 nv_icmd(priv, 0x0000021c, 0x0000c080);
2222 nv_icmd(priv, 0x0000021d, 0x0000c080);
2223 nv_icmd(priv, 0x0000021e, 0x0000c080);
2224 nv_icmd(priv, 0x0000021f, 0x0000c080);
2225 if (nv_device(priv)->chipset == 0xd9) {
2226 for (i = 0x0440; i <= 0x0457; i++)
2227 nv_icmd(priv, i, 0x0000c080);
2228 }
2229 nv_icmd(priv, 0x000000ad, 0x0000013e);
2230 nv_icmd(priv, 0x000000e1, 0x00000010);
2231 nv_icmd(priv, 0x00000290, 0x00000000);
2232 nv_icmd(priv, 0x00000291, 0x00000000);
2233 nv_icmd(priv, 0x00000292, 0x00000000);
2234 nv_icmd(priv, 0x00000293, 0x00000000);
2235 nv_icmd(priv, 0x00000294, 0x00000000);
2236 nv_icmd(priv, 0x00000295, 0x00000000);
2237 nv_icmd(priv, 0x00000296, 0x00000000);
2238 nv_icmd(priv, 0x00000297, 0x00000000);
2239 nv_icmd(priv, 0x00000298, 0x00000000);
2240 nv_icmd(priv, 0x00000299, 0x00000000);
2241 nv_icmd(priv, 0x0000029a, 0x00000000);
2242 nv_icmd(priv, 0x0000029b, 0x00000000);
2243 nv_icmd(priv, 0x0000029c, 0x00000000);
2244 nv_icmd(priv, 0x0000029d, 0x00000000);
2245 nv_icmd(priv, 0x0000029e, 0x00000000);
2246 nv_icmd(priv, 0x0000029f, 0x00000000);
2247 nv_icmd(priv, 0x000003b0, 0x00000000);
2248 nv_icmd(priv, 0x000003b1, 0x00000000);
2249 nv_icmd(priv, 0x000003b2, 0x00000000);
2250 nv_icmd(priv, 0x000003b3, 0x00000000);
2251 nv_icmd(priv, 0x000003b4, 0x00000000);
2252 nv_icmd(priv, 0x000003b5, 0x00000000);
2253 nv_icmd(priv, 0x000003b6, 0x00000000);
2254 nv_icmd(priv, 0x000003b7, 0x00000000);
2255 nv_icmd(priv, 0x000003b8, 0x00000000);
2256 nv_icmd(priv, 0x000003b9, 0x00000000);
2257 nv_icmd(priv, 0x000003ba, 0x00000000);
2258 nv_icmd(priv, 0x000003bb, 0x00000000);
2259 nv_icmd(priv, 0x000003bc, 0x00000000);
2260 nv_icmd(priv, 0x000003bd, 0x00000000);
2261 nv_icmd(priv, 0x000003be, 0x00000000);
2262 nv_icmd(priv, 0x000003bf, 0x00000000);
2263 nv_icmd(priv, 0x000002a0, 0x00000000);
2264 nv_icmd(priv, 0x000002a1, 0x00000000);
2265 nv_icmd(priv, 0x000002a2, 0x00000000);
2266 nv_icmd(priv, 0x000002a3, 0x00000000);
2267 nv_icmd(priv, 0x000002a4, 0x00000000);
2268 nv_icmd(priv, 0x000002a5, 0x00000000);
2269 nv_icmd(priv, 0x000002a6, 0x00000000);
2270 nv_icmd(priv, 0x000002a7, 0x00000000);
2271 nv_icmd(priv, 0x000002a8, 0x00000000);
2272 nv_icmd(priv, 0x000002a9, 0x00000000);
2273 nv_icmd(priv, 0x000002aa, 0x00000000);
2274 nv_icmd(priv, 0x000002ab, 0x00000000);
2275 nv_icmd(priv, 0x000002ac, 0x00000000);
2276 nv_icmd(priv, 0x000002ad, 0x00000000);
2277 nv_icmd(priv, 0x000002ae, 0x00000000);
2278 nv_icmd(priv, 0x000002af, 0x00000000);
2279 nv_icmd(priv, 0x00000420, 0x00000000);
2280 nv_icmd(priv, 0x00000421, 0x00000000);
2281 nv_icmd(priv, 0x00000422, 0x00000000);
2282 nv_icmd(priv, 0x00000423, 0x00000000);
2283 nv_icmd(priv, 0x00000424, 0x00000000);
2284 nv_icmd(priv, 0x00000425, 0x00000000);
2285 nv_icmd(priv, 0x00000426, 0x00000000);
2286 nv_icmd(priv, 0x00000427, 0x00000000);
2287 nv_icmd(priv, 0x00000428, 0x00000000);
2288 nv_icmd(priv, 0x00000429, 0x00000000);
2289 nv_icmd(priv, 0x0000042a, 0x00000000);
2290 nv_icmd(priv, 0x0000042b, 0x00000000);
2291 nv_icmd(priv, 0x0000042c, 0x00000000);
2292 nv_icmd(priv, 0x0000042d, 0x00000000);
2293 nv_icmd(priv, 0x0000042e, 0x00000000);
2294 nv_icmd(priv, 0x0000042f, 0x00000000);
2295 nv_icmd(priv, 0x000002b0, 0x00000000);
2296 nv_icmd(priv, 0x000002b1, 0x00000000);
2297 nv_icmd(priv, 0x000002b2, 0x00000000);
2298 nv_icmd(priv, 0x000002b3, 0x00000000);
2299 nv_icmd(priv, 0x000002b4, 0x00000000);
2300 nv_icmd(priv, 0x000002b5, 0x00000000);
2301 nv_icmd(priv, 0x000002b6, 0x00000000);
2302 nv_icmd(priv, 0x000002b7, 0x00000000);
2303 nv_icmd(priv, 0x000002b8, 0x00000000);
2304 nv_icmd(priv, 0x000002b9, 0x00000000);
2305 nv_icmd(priv, 0x000002ba, 0x00000000);
2306 nv_icmd(priv, 0x000002bb, 0x00000000);
2307 nv_icmd(priv, 0x000002bc, 0x00000000);
2308 nv_icmd(priv, 0x000002bd, 0x00000000);
2309 nv_icmd(priv, 0x000002be, 0x00000000);
2310 nv_icmd(priv, 0x000002bf, 0x00000000);
2311 nv_icmd(priv, 0x00000430, 0x00000000);
2312 nv_icmd(priv, 0x00000431, 0x00000000);
2313 nv_icmd(priv, 0x00000432, 0x00000000);
2314 nv_icmd(priv, 0x00000433, 0x00000000);
2315 nv_icmd(priv, 0x00000434, 0x00000000);
2316 nv_icmd(priv, 0x00000435, 0x00000000);
2317 nv_icmd(priv, 0x00000436, 0x00000000);
2318 nv_icmd(priv, 0x00000437, 0x00000000);
2319 nv_icmd(priv, 0x00000438, 0x00000000);
2320 nv_icmd(priv, 0x00000439, 0x00000000);
2321 nv_icmd(priv, 0x0000043a, 0x00000000);
2322 nv_icmd(priv, 0x0000043b, 0x00000000);
2323 nv_icmd(priv, 0x0000043c, 0x00000000);
2324 nv_icmd(priv, 0x0000043d, 0x00000000);
2325 nv_icmd(priv, 0x0000043e, 0x00000000);
2326 nv_icmd(priv, 0x0000043f, 0x00000000);
2327 nv_icmd(priv, 0x000002c0, 0x00000000);
2328 nv_icmd(priv, 0x000002c1, 0x00000000);
2329 nv_icmd(priv, 0x000002c2, 0x00000000);
2330 nv_icmd(priv, 0x000002c3, 0x00000000);
2331 nv_icmd(priv, 0x000002c4, 0x00000000);
2332 nv_icmd(priv, 0x000002c5, 0x00000000);
2333 nv_icmd(priv, 0x000002c6, 0x00000000);
2334 nv_icmd(priv, 0x000002c7, 0x00000000);
2335 nv_icmd(priv, 0x000002c8, 0x00000000);
2336 nv_icmd(priv, 0x000002c9, 0x00000000);
2337 nv_icmd(priv, 0x000002ca, 0x00000000);
2338 nv_icmd(priv, 0x000002cb, 0x00000000);
2339 nv_icmd(priv, 0x000002cc, 0x00000000);
2340 nv_icmd(priv, 0x000002cd, 0x00000000);
2341 nv_icmd(priv, 0x000002ce, 0x00000000);
2342 nv_icmd(priv, 0x000002cf, 0x00000000);
2343 nv_icmd(priv, 0x000004d0, 0x00000000);
2344 nv_icmd(priv, 0x000004d1, 0x00000000);
2345 nv_icmd(priv, 0x000004d2, 0x00000000);
2346 nv_icmd(priv, 0x000004d3, 0x00000000);
2347 nv_icmd(priv, 0x000004d4, 0x00000000);
2348 nv_icmd(priv, 0x000004d5, 0x00000000);
2349 nv_icmd(priv, 0x000004d6, 0x00000000);
2350 nv_icmd(priv, 0x000004d7, 0x00000000);
2351 nv_icmd(priv, 0x000004d8, 0x00000000);
2352 nv_icmd(priv, 0x000004d9, 0x00000000);
2353 nv_icmd(priv, 0x000004da, 0x00000000);
2354 nv_icmd(priv, 0x000004db, 0x00000000);
2355 nv_icmd(priv, 0x000004dc, 0x00000000);
2356 nv_icmd(priv, 0x000004dd, 0x00000000);
2357 nv_icmd(priv, 0x000004de, 0x00000000);
2358 nv_icmd(priv, 0x000004df, 0x00000000);
2359 nv_icmd(priv, 0x00000720, 0x00000000);
2360 nv_icmd(priv, 0x00000721, 0x00000000);
2361 nv_icmd(priv, 0x00000722, 0x00000000);
2362 nv_icmd(priv, 0x00000723, 0x00000000);
2363 nv_icmd(priv, 0x00000724, 0x00000000);
2364 nv_icmd(priv, 0x00000725, 0x00000000);
2365 nv_icmd(priv, 0x00000726, 0x00000000);
2366 nv_icmd(priv, 0x00000727, 0x00000000);
2367 nv_icmd(priv, 0x00000728, 0x00000000);
2368 nv_icmd(priv, 0x00000729, 0x00000000);
2369 nv_icmd(priv, 0x0000072a, 0x00000000);
2370 nv_icmd(priv, 0x0000072b, 0x00000000);
2371 nv_icmd(priv, 0x0000072c, 0x00000000);
2372 nv_icmd(priv, 0x0000072d, 0x00000000);
2373 nv_icmd(priv, 0x0000072e, 0x00000000);
2374 nv_icmd(priv, 0x0000072f, 0x00000000);
2375 nv_icmd(priv, 0x000008c0, 0x00000000);
2376 nv_icmd(priv, 0x000008c1, 0x00000000);
2377 nv_icmd(priv, 0x000008c2, 0x00000000);
2378 nv_icmd(priv, 0x000008c3, 0x00000000);
2379 nv_icmd(priv, 0x000008c4, 0x00000000);
2380 nv_icmd(priv, 0x000008c5, 0x00000000);
2381 nv_icmd(priv, 0x000008c6, 0x00000000);
2382 nv_icmd(priv, 0x000008c7, 0x00000000);
2383 nv_icmd(priv, 0x000008c8, 0x00000000);
2384 nv_icmd(priv, 0x000008c9, 0x00000000);
2385 nv_icmd(priv, 0x000008ca, 0x00000000);
2386 nv_icmd(priv, 0x000008cb, 0x00000000);
2387 nv_icmd(priv, 0x000008cc, 0x00000000);
2388 nv_icmd(priv, 0x000008cd, 0x00000000);
2389 nv_icmd(priv, 0x000008ce, 0x00000000);
2390 nv_icmd(priv, 0x000008cf, 0x00000000);
2391 nv_icmd(priv, 0x00000890, 0x00000000);
2392 nv_icmd(priv, 0x00000891, 0x00000000);
2393 nv_icmd(priv, 0x00000892, 0x00000000);
2394 nv_icmd(priv, 0x00000893, 0x00000000);
2395 nv_icmd(priv, 0x00000894, 0x00000000);
2396 nv_icmd(priv, 0x00000895, 0x00000000);
2397 nv_icmd(priv, 0x00000896, 0x00000000);
2398 nv_icmd(priv, 0x00000897, 0x00000000);
2399 nv_icmd(priv, 0x00000898, 0x00000000);
2400 nv_icmd(priv, 0x00000899, 0x00000000);
2401 nv_icmd(priv, 0x0000089a, 0x00000000);
2402 nv_icmd(priv, 0x0000089b, 0x00000000);
2403 nv_icmd(priv, 0x0000089c, 0x00000000);
2404 nv_icmd(priv, 0x0000089d, 0x00000000);
2405 nv_icmd(priv, 0x0000089e, 0x00000000);
2406 nv_icmd(priv, 0x0000089f, 0x00000000);
2407 nv_icmd(priv, 0x000008e0, 0x00000000);
2408 nv_icmd(priv, 0x000008e1, 0x00000000);
2409 nv_icmd(priv, 0x000008e2, 0x00000000);
2410 nv_icmd(priv, 0x000008e3, 0x00000000);
2411 nv_icmd(priv, 0x000008e4, 0x00000000);
2412 nv_icmd(priv, 0x000008e5, 0x00000000);
2413 nv_icmd(priv, 0x000008e6, 0x00000000);
2414 nv_icmd(priv, 0x000008e7, 0x00000000);
2415 nv_icmd(priv, 0x000008e8, 0x00000000);
2416 nv_icmd(priv, 0x000008e9, 0x00000000);
2417 nv_icmd(priv, 0x000008ea, 0x00000000);
2418 nv_icmd(priv, 0x000008eb, 0x00000000);
2419 nv_icmd(priv, 0x000008ec, 0x00000000);
2420 nv_icmd(priv, 0x000008ed, 0x00000000);
2421 nv_icmd(priv, 0x000008ee, 0x00000000);
2422 nv_icmd(priv, 0x000008ef, 0x00000000);
2423 nv_icmd(priv, 0x000008a0, 0x00000000);
2424 nv_icmd(priv, 0x000008a1, 0x00000000);
2425 nv_icmd(priv, 0x000008a2, 0x00000000);
2426 nv_icmd(priv, 0x000008a3, 0x00000000);
2427 nv_icmd(priv, 0x000008a4, 0x00000000);
2428 nv_icmd(priv, 0x000008a5, 0x00000000);
2429 nv_icmd(priv, 0x000008a6, 0x00000000);
2430 nv_icmd(priv, 0x000008a7, 0x00000000);
2431 nv_icmd(priv, 0x000008a8, 0x00000000);
2432 nv_icmd(priv, 0x000008a9, 0x00000000);
2433 nv_icmd(priv, 0x000008aa, 0x00000000);
2434 nv_icmd(priv, 0x000008ab, 0x00000000);
2435 nv_icmd(priv, 0x000008ac, 0x00000000);
2436 nv_icmd(priv, 0x000008ad, 0x00000000);
2437 nv_icmd(priv, 0x000008ae, 0x00000000);
2438 nv_icmd(priv, 0x000008af, 0x00000000);
2439 nv_icmd(priv, 0x000008f0, 0x00000000);
2440 nv_icmd(priv, 0x000008f1, 0x00000000);
2441 nv_icmd(priv, 0x000008f2, 0x00000000);
2442 nv_icmd(priv, 0x000008f3, 0x00000000);
2443 nv_icmd(priv, 0x000008f4, 0x00000000);
2444 nv_icmd(priv, 0x000008f5, 0x00000000);
2445 nv_icmd(priv, 0x000008f6, 0x00000000);
2446 nv_icmd(priv, 0x000008f7, 0x00000000);
2447 nv_icmd(priv, 0x000008f8, 0x00000000);
2448 nv_icmd(priv, 0x000008f9, 0x00000000);
2449 nv_icmd(priv, 0x000008fa, 0x00000000);
2450 nv_icmd(priv, 0x000008fb, 0x00000000);
2451 nv_icmd(priv, 0x000008fc, 0x00000000);
2452 nv_icmd(priv, 0x000008fd, 0x00000000);
2453 nv_icmd(priv, 0x000008fe, 0x00000000);
2454 nv_icmd(priv, 0x000008ff, 0x00000000);
2455 nv_icmd(priv, 0x0000094c, 0x000000ff);
2456 nv_icmd(priv, 0x0000094d, 0xffffffff);
2457 nv_icmd(priv, 0x0000094e, 0x00000002);
2458 nv_icmd(priv, 0x000002ec, 0x00000001);
2459 nv_icmd(priv, 0x00000303, 0x00000001);
2460 nv_icmd(priv, 0x000002e6, 0x00000001);
2461 nv_icmd(priv, 0x00000466, 0x00000052);
2462 nv_icmd(priv, 0x00000301, 0x3f800000);
2463 nv_icmd(priv, 0x00000304, 0x30201000);
2464 nv_icmd(priv, 0x00000305, 0x70605040);
2465 nv_icmd(priv, 0x00000306, 0xb8a89888);
2466 nv_icmd(priv, 0x00000307, 0xf8e8d8c8);
2467 nv_icmd(priv, 0x0000030a, 0x00ffff00);
2468 nv_icmd(priv, 0x0000030b, 0x0000001a);
2469 nv_icmd(priv, 0x0000030c, 0x00000001);
2470 nv_icmd(priv, 0x00000318, 0x00000001);
2471 nv_icmd(priv, 0x00000340, 0x00000000);
2472 nv_icmd(priv, 0x00000375, 0x00000001);
2473 nv_icmd(priv, 0x00000351, 0x00000100);
2474 nv_icmd(priv, 0x0000037d, 0x00000006);
2475 nv_icmd(priv, 0x000003a0, 0x00000002);
2476 nv_icmd(priv, 0x000003aa, 0x00000001);
2477 nv_icmd(priv, 0x000003a9, 0x00000001);
2478 nv_icmd(priv, 0x00000380, 0x00000001);
2479 nv_icmd(priv, 0x00000360, 0x00000040);
2480 nv_icmd(priv, 0x00000366, 0x00000000);
2481 nv_icmd(priv, 0x00000367, 0x00000000);
2482 nv_icmd(priv, 0x00000368, 0x00001fff);
2483 nv_icmd(priv, 0x00000370, 0x00000000);
2484 nv_icmd(priv, 0x00000371, 0x00000000);
2485 nv_icmd(priv, 0x00000372, 0x003fffff);
2486 nv_icmd(priv, 0x0000037a, 0x00000012);
2487 nv_icmd(priv, 0x000005e0, 0x00000022);
2488 nv_icmd(priv, 0x000005e1, 0x00000022);
2489 nv_icmd(priv, 0x000005e2, 0x00000022);
2490 nv_icmd(priv, 0x000005e3, 0x00000022);
2491 nv_icmd(priv, 0x000005e4, 0x00000022);
2492 nv_icmd(priv, 0x00000619, 0x00000003);
2493 nv_icmd(priv, 0x00000811, 0x00000003);
2494 nv_icmd(priv, 0x00000812, 0x00000004);
2495 nv_icmd(priv, 0x00000813, 0x00000006);
2496 nv_icmd(priv, 0x00000814, 0x00000008);
2497 nv_icmd(priv, 0x00000815, 0x0000000b);
2498 nv_icmd(priv, 0x00000800, 0x00000001);
2499 nv_icmd(priv, 0x00000801, 0x00000001);
2500 nv_icmd(priv, 0x00000802, 0x00000001);
2501 nv_icmd(priv, 0x00000803, 0x00000001);
2502 nv_icmd(priv, 0x00000804, 0x00000001);
2503 nv_icmd(priv, 0x00000805, 0x00000001);
2504 nv_icmd(priv, 0x00000632, 0x00000001);
2505 nv_icmd(priv, 0x00000633, 0x00000002);
2506 nv_icmd(priv, 0x00000634, 0x00000003);
2507 nv_icmd(priv, 0x00000635, 0x00000004);
2508 nv_icmd(priv, 0x00000654, 0x3f800000);
2509 nv_icmd(priv, 0x00000657, 0x3f800000);
2510 nv_icmd(priv, 0x00000655, 0x3f800000);
2511 nv_icmd(priv, 0x00000656, 0x3f800000);
2512 nv_icmd(priv, 0x000006cd, 0x3f800000);
2513 nv_icmd(priv, 0x000007f5, 0x3f800000);
2514 nv_icmd(priv, 0x000007dc, 0x39291909);
2515 nv_icmd(priv, 0x000007dd, 0x79695949);
2516 nv_icmd(priv, 0x000007de, 0xb9a99989);
2517 nv_icmd(priv, 0x000007df, 0xf9e9d9c9);
2518 nv_icmd(priv, 0x000007e8, 0x00003210);
2519 nv_icmd(priv, 0x000007e9, 0x00007654);
2520 nv_icmd(priv, 0x000007ea, 0x00000098);
2521 nv_icmd(priv, 0x000007ec, 0x39291909);
2522 nv_icmd(priv, 0x000007ed, 0x79695949);
2523 nv_icmd(priv, 0x000007ee, 0xb9a99989);
2524 nv_icmd(priv, 0x000007ef, 0xf9e9d9c9);
2525 nv_icmd(priv, 0x000007f0, 0x00003210);
2526 nv_icmd(priv, 0x000007f1, 0x00007654);
2527 nv_icmd(priv, 0x000007f2, 0x00000098);
2528 nv_icmd(priv, 0x000005a5, 0x00000001);
2529 nv_icmd(priv, 0x00000980, 0x00000000);
2530 nv_icmd(priv, 0x00000981, 0x00000000);
2531 nv_icmd(priv, 0x00000982, 0x00000000);
2532 nv_icmd(priv, 0x00000983, 0x00000000);
2533 nv_icmd(priv, 0x00000984, 0x00000000);
2534 nv_icmd(priv, 0x00000985, 0x00000000);
2535 nv_icmd(priv, 0x00000986, 0x00000000);
2536 nv_icmd(priv, 0x00000987, 0x00000000);
2537 nv_icmd(priv, 0x00000988, 0x00000000);
2538 nv_icmd(priv, 0x00000989, 0x00000000);
2539 nv_icmd(priv, 0x0000098a, 0x00000000);
2540 nv_icmd(priv, 0x0000098b, 0x00000000);
2541 nv_icmd(priv, 0x0000098c, 0x00000000);
2542 nv_icmd(priv, 0x0000098d, 0x00000000);
2543 nv_icmd(priv, 0x0000098e, 0x00000000);
2544 nv_icmd(priv, 0x0000098f, 0x00000000);
2545 nv_icmd(priv, 0x00000990, 0x00000000);
2546 nv_icmd(priv, 0x00000991, 0x00000000);
2547 nv_icmd(priv, 0x00000992, 0x00000000);
2548 nv_icmd(priv, 0x00000993, 0x00000000);
2549 nv_icmd(priv, 0x00000994, 0x00000000);
2550 nv_icmd(priv, 0x00000995, 0x00000000);
2551 nv_icmd(priv, 0x00000996, 0x00000000);
2552 nv_icmd(priv, 0x00000997, 0x00000000);
2553 nv_icmd(priv, 0x00000998, 0x00000000);
2554 nv_icmd(priv, 0x00000999, 0x00000000);
2555 nv_icmd(priv, 0x0000099a, 0x00000000);
2556 nv_icmd(priv, 0x0000099b, 0x00000000);
2557 nv_icmd(priv, 0x0000099c, 0x00000000);
2558 nv_icmd(priv, 0x0000099d, 0x00000000);
2559 nv_icmd(priv, 0x0000099e, 0x00000000);
2560 nv_icmd(priv, 0x0000099f, 0x00000000);
2561 nv_icmd(priv, 0x000009a0, 0x00000000);
2562 nv_icmd(priv, 0x000009a1, 0x00000000);
2563 nv_icmd(priv, 0x000009a2, 0x00000000);
2564 nv_icmd(priv, 0x000009a3, 0x00000000);
2565 nv_icmd(priv, 0x000009a4, 0x00000000);
2566 nv_icmd(priv, 0x000009a5, 0x00000000);
2567 nv_icmd(priv, 0x000009a6, 0x00000000);
2568 nv_icmd(priv, 0x000009a7, 0x00000000);
2569 nv_icmd(priv, 0x000009a8, 0x00000000);
2570 nv_icmd(priv, 0x000009a9, 0x00000000);
2571 nv_icmd(priv, 0x000009aa, 0x00000000);
2572 nv_icmd(priv, 0x000009ab, 0x00000000);
2573 nv_icmd(priv, 0x000009ac, 0x00000000);
2574 nv_icmd(priv, 0x000009ad, 0x00000000);
2575 nv_icmd(priv, 0x000009ae, 0x00000000);
2576 nv_icmd(priv, 0x000009af, 0x00000000);
2577 nv_icmd(priv, 0x000009b0, 0x00000000);
2578 nv_icmd(priv, 0x000009b1, 0x00000000);
2579 nv_icmd(priv, 0x000009b2, 0x00000000);
2580 nv_icmd(priv, 0x000009b3, 0x00000000);
2581 nv_icmd(priv, 0x000009b4, 0x00000000);
2582 nv_icmd(priv, 0x000009b5, 0x00000000);
2583 nv_icmd(priv, 0x000009b6, 0x00000000);
2584 nv_icmd(priv, 0x000009b7, 0x00000000);
2585 nv_icmd(priv, 0x000009b8, 0x00000000);
2586 nv_icmd(priv, 0x000009b9, 0x00000000);
2587 nv_icmd(priv, 0x000009ba, 0x00000000);
2588 nv_icmd(priv, 0x000009bb, 0x00000000);
2589 nv_icmd(priv, 0x000009bc, 0x00000000);
2590 nv_icmd(priv, 0x000009bd, 0x00000000);
2591 nv_icmd(priv, 0x000009be, 0x00000000);
2592 nv_icmd(priv, 0x000009bf, 0x00000000);
2593 nv_icmd(priv, 0x000009c0, 0x00000000);
2594 nv_icmd(priv, 0x000009c1, 0x00000000);
2595 nv_icmd(priv, 0x000009c2, 0x00000000);
2596 nv_icmd(priv, 0x000009c3, 0x00000000);
2597 nv_icmd(priv, 0x000009c4, 0x00000000);
2598 nv_icmd(priv, 0x000009c5, 0x00000000);
2599 nv_icmd(priv, 0x000009c6, 0x00000000);
2600 nv_icmd(priv, 0x000009c7, 0x00000000);
2601 nv_icmd(priv, 0x000009c8, 0x00000000);
2602 nv_icmd(priv, 0x000009c9, 0x00000000);
2603 nv_icmd(priv, 0x000009ca, 0x00000000);
2604 nv_icmd(priv, 0x000009cb, 0x00000000);
2605 nv_icmd(priv, 0x000009cc, 0x00000000);
2606 nv_icmd(priv, 0x000009cd, 0x00000000);
2607 nv_icmd(priv, 0x000009ce, 0x00000000);
2608 nv_icmd(priv, 0x000009cf, 0x00000000);
2609 nv_icmd(priv, 0x000009d0, 0x00000000);
2610 nv_icmd(priv, 0x000009d1, 0x00000000);
2611 nv_icmd(priv, 0x000009d2, 0x00000000);
2612 nv_icmd(priv, 0x000009d3, 0x00000000);
2613 nv_icmd(priv, 0x000009d4, 0x00000000);
2614 nv_icmd(priv, 0x000009d5, 0x00000000);
2615 nv_icmd(priv, 0x000009d6, 0x00000000);
2616 nv_icmd(priv, 0x000009d7, 0x00000000);
2617 nv_icmd(priv, 0x000009d8, 0x00000000);
2618 nv_icmd(priv, 0x000009d9, 0x00000000);
2619 nv_icmd(priv, 0x000009da, 0x00000000);
2620 nv_icmd(priv, 0x000009db, 0x00000000);
2621 nv_icmd(priv, 0x000009dc, 0x00000000);
2622 nv_icmd(priv, 0x000009dd, 0x00000000);
2623 nv_icmd(priv, 0x000009de, 0x00000000);
2624 nv_icmd(priv, 0x000009df, 0x00000000);
2625 nv_icmd(priv, 0x000009e0, 0x00000000);
2626 nv_icmd(priv, 0x000009e1, 0x00000000);
2627 nv_icmd(priv, 0x000009e2, 0x00000000);
2628 nv_icmd(priv, 0x000009e3, 0x00000000);
2629 nv_icmd(priv, 0x000009e4, 0x00000000);
2630 nv_icmd(priv, 0x000009e5, 0x00000000);
2631 nv_icmd(priv, 0x000009e6, 0x00000000);
2632 nv_icmd(priv, 0x000009e7, 0x00000000);
2633 nv_icmd(priv, 0x000009e8, 0x00000000);
2634 nv_icmd(priv, 0x000009e9, 0x00000000);
2635 nv_icmd(priv, 0x000009ea, 0x00000000);
2636 nv_icmd(priv, 0x000009eb, 0x00000000);
2637 nv_icmd(priv, 0x000009ec, 0x00000000);
2638 nv_icmd(priv, 0x000009ed, 0x00000000);
2639 nv_icmd(priv, 0x000009ee, 0x00000000);
2640 nv_icmd(priv, 0x000009ef, 0x00000000);
2641 nv_icmd(priv, 0x000009f0, 0x00000000);
2642 nv_icmd(priv, 0x000009f1, 0x00000000);
2643 nv_icmd(priv, 0x000009f2, 0x00000000);
2644 nv_icmd(priv, 0x000009f3, 0x00000000);
2645 nv_icmd(priv, 0x000009f4, 0x00000000);
2646 nv_icmd(priv, 0x000009f5, 0x00000000);
2647 nv_icmd(priv, 0x000009f6, 0x00000000);
2648 nv_icmd(priv, 0x000009f7, 0x00000000);
2649 nv_icmd(priv, 0x000009f8, 0x00000000);
2650 nv_icmd(priv, 0x000009f9, 0x00000000);
2651 nv_icmd(priv, 0x000009fa, 0x00000000);
2652 nv_icmd(priv, 0x000009fb, 0x00000000);
2653 nv_icmd(priv, 0x000009fc, 0x00000000);
2654 nv_icmd(priv, 0x000009fd, 0x00000000);
2655 nv_icmd(priv, 0x000009fe, 0x00000000);
2656 nv_icmd(priv, 0x000009ff, 0x00000000);
2657 nv_icmd(priv, 0x00000468, 0x00000004);
2658 nv_icmd(priv, 0x0000046c, 0x00000001);
2659 nv_icmd(priv, 0x00000470, 0x00000000);
2660 nv_icmd(priv, 0x00000471, 0x00000000);
2661 nv_icmd(priv, 0x00000472, 0x00000000);
2662 nv_icmd(priv, 0x00000473, 0x00000000);
2663 nv_icmd(priv, 0x00000474, 0x00000000);
2664 nv_icmd(priv, 0x00000475, 0x00000000);
2665 nv_icmd(priv, 0x00000476, 0x00000000);
2666 nv_icmd(priv, 0x00000477, 0x00000000);
2667 nv_icmd(priv, 0x00000478, 0x00000000);
2668 nv_icmd(priv, 0x00000479, 0x00000000);
2669 nv_icmd(priv, 0x0000047a, 0x00000000);
2670 nv_icmd(priv, 0x0000047b, 0x00000000);
2671 nv_icmd(priv, 0x0000047c, 0x00000000);
2672 nv_icmd(priv, 0x0000047d, 0x00000000);
2673 nv_icmd(priv, 0x0000047e, 0x00000000);
2674 nv_icmd(priv, 0x0000047f, 0x00000000);
2675 nv_icmd(priv, 0x00000480, 0x00000000);
2676 nv_icmd(priv, 0x00000481, 0x00000000);
2677 nv_icmd(priv, 0x00000482, 0x00000000);
2678 nv_icmd(priv, 0x00000483, 0x00000000);
2679 nv_icmd(priv, 0x00000484, 0x00000000);
2680 nv_icmd(priv, 0x00000485, 0x00000000);
2681 nv_icmd(priv, 0x00000486, 0x00000000);
2682 nv_icmd(priv, 0x00000487, 0x00000000);
2683 nv_icmd(priv, 0x00000488, 0x00000000);
2684 nv_icmd(priv, 0x00000489, 0x00000000);
2685 nv_icmd(priv, 0x0000048a, 0x00000000);
2686 nv_icmd(priv, 0x0000048b, 0x00000000);
2687 nv_icmd(priv, 0x0000048c, 0x00000000);
2688 nv_icmd(priv, 0x0000048d, 0x00000000);
2689 nv_icmd(priv, 0x0000048e, 0x00000000);
2690 nv_icmd(priv, 0x0000048f, 0x00000000);
2691 nv_icmd(priv, 0x00000490, 0x00000000);
2692 nv_icmd(priv, 0x00000491, 0x00000000);
2693 nv_icmd(priv, 0x00000492, 0x00000000);
2694 nv_icmd(priv, 0x00000493, 0x00000000);
2695 nv_icmd(priv, 0x00000494, 0x00000000);
2696 nv_icmd(priv, 0x00000495, 0x00000000);
2697 nv_icmd(priv, 0x00000496, 0x00000000);
2698 nv_icmd(priv, 0x00000497, 0x00000000);
2699 nv_icmd(priv, 0x00000498, 0x00000000);
2700 nv_icmd(priv, 0x00000499, 0x00000000);
2701 nv_icmd(priv, 0x0000049a, 0x00000000);
2702 nv_icmd(priv, 0x0000049b, 0x00000000);
2703 nv_icmd(priv, 0x0000049c, 0x00000000);
2704 nv_icmd(priv, 0x0000049d, 0x00000000);
2705 nv_icmd(priv, 0x0000049e, 0x00000000);
2706 nv_icmd(priv, 0x0000049f, 0x00000000);
2707 nv_icmd(priv, 0x000004a0, 0x00000000);
2708 nv_icmd(priv, 0x000004a1, 0x00000000);
2709 nv_icmd(priv, 0x000004a2, 0x00000000);
2710 nv_icmd(priv, 0x000004a3, 0x00000000);
2711 nv_icmd(priv, 0x000004a4, 0x00000000);
2712 nv_icmd(priv, 0x000004a5, 0x00000000);
2713 nv_icmd(priv, 0x000004a6, 0x00000000);
2714 nv_icmd(priv, 0x000004a7, 0x00000000);
2715 nv_icmd(priv, 0x000004a8, 0x00000000);
2716 nv_icmd(priv, 0x000004a9, 0x00000000);
2717 nv_icmd(priv, 0x000004aa, 0x00000000);
2718 nv_icmd(priv, 0x000004ab, 0x00000000);
2719 nv_icmd(priv, 0x000004ac, 0x00000000);
2720 nv_icmd(priv, 0x000004ad, 0x00000000);
2721 nv_icmd(priv, 0x000004ae, 0x00000000);
2722 nv_icmd(priv, 0x000004af, 0x00000000);
2723 nv_icmd(priv, 0x000004b0, 0x00000000);
2724 nv_icmd(priv, 0x000004b1, 0x00000000);
2725 nv_icmd(priv, 0x000004b2, 0x00000000);
2726 nv_icmd(priv, 0x000004b3, 0x00000000);
2727 nv_icmd(priv, 0x000004b4, 0x00000000);
2728 nv_icmd(priv, 0x000004b5, 0x00000000);
2729 nv_icmd(priv, 0x000004b6, 0x00000000);
2730 nv_icmd(priv, 0x000004b7, 0x00000000);
2731 nv_icmd(priv, 0x000004b8, 0x00000000);
2732 nv_icmd(priv, 0x000004b9, 0x00000000);
2733 nv_icmd(priv, 0x000004ba, 0x00000000);
2734 nv_icmd(priv, 0x000004bb, 0x00000000);
2735 nv_icmd(priv, 0x000004bc, 0x00000000);
2736 nv_icmd(priv, 0x000004bd, 0x00000000);
2737 nv_icmd(priv, 0x000004be, 0x00000000);
2738 nv_icmd(priv, 0x000004bf, 0x00000000);
2739 nv_icmd(priv, 0x000004c0, 0x00000000);
2740 nv_icmd(priv, 0x000004c1, 0x00000000);
2741 nv_icmd(priv, 0x000004c2, 0x00000000);
2742 nv_icmd(priv, 0x000004c3, 0x00000000);
2743 nv_icmd(priv, 0x000004c4, 0x00000000);
2744 nv_icmd(priv, 0x000004c5, 0x00000000);
2745 nv_icmd(priv, 0x000004c6, 0x00000000);
2746 nv_icmd(priv, 0x000004c7, 0x00000000);
2747 nv_icmd(priv, 0x000004c8, 0x00000000);
2748 nv_icmd(priv, 0x000004c9, 0x00000000);
2749 nv_icmd(priv, 0x000004ca, 0x00000000);
2750 nv_icmd(priv, 0x000004cb, 0x00000000);
2751 nv_icmd(priv, 0x000004cc, 0x00000000);
2752 nv_icmd(priv, 0x000004cd, 0x00000000);
2753 nv_icmd(priv, 0x000004ce, 0x00000000);
2754 nv_icmd(priv, 0x000004cf, 0x00000000);
2755 nv_icmd(priv, 0x00000510, 0x3f800000);
2756 nv_icmd(priv, 0x00000511, 0x3f800000);
2757 nv_icmd(priv, 0x00000512, 0x3f800000);
2758 nv_icmd(priv, 0x00000513, 0x3f800000);
2759 nv_icmd(priv, 0x00000514, 0x3f800000);
2760 nv_icmd(priv, 0x00000515, 0x3f800000);
2761 nv_icmd(priv, 0x00000516, 0x3f800000);
2762 nv_icmd(priv, 0x00000517, 0x3f800000);
2763 nv_icmd(priv, 0x00000518, 0x3f800000);
2764 nv_icmd(priv, 0x00000519, 0x3f800000);
2765 nv_icmd(priv, 0x0000051a, 0x3f800000);
2766 nv_icmd(priv, 0x0000051b, 0x3f800000);
2767 nv_icmd(priv, 0x0000051c, 0x3f800000);
2768 nv_icmd(priv, 0x0000051d, 0x3f800000);
2769 nv_icmd(priv, 0x0000051e, 0x3f800000);
2770 nv_icmd(priv, 0x0000051f, 0x3f800000);
2771 nv_icmd(priv, 0x00000520, 0x000002b6);
2772 nv_icmd(priv, 0x00000529, 0x00000001);
2773 nv_icmd(priv, 0x00000530, 0xffff0000);
2774 nv_icmd(priv, 0x00000531, 0xffff0000);
2775 nv_icmd(priv, 0x00000532, 0xffff0000);
2776 nv_icmd(priv, 0x00000533, 0xffff0000);
2777 nv_icmd(priv, 0x00000534, 0xffff0000);
2778 nv_icmd(priv, 0x00000535, 0xffff0000);
2779 nv_icmd(priv, 0x00000536, 0xffff0000);
2780 nv_icmd(priv, 0x00000537, 0xffff0000);
2781 nv_icmd(priv, 0x00000538, 0xffff0000);
2782 nv_icmd(priv, 0x00000539, 0xffff0000);
2783 nv_icmd(priv, 0x0000053a, 0xffff0000);
2784 nv_icmd(priv, 0x0000053b, 0xffff0000);
2785 nv_icmd(priv, 0x0000053c, 0xffff0000);
2786 nv_icmd(priv, 0x0000053d, 0xffff0000);
2787 nv_icmd(priv, 0x0000053e, 0xffff0000);
2788 nv_icmd(priv, 0x0000053f, 0xffff0000);
2789 nv_icmd(priv, 0x00000585, 0x0000003f);
2790 nv_icmd(priv, 0x00000576, 0x00000003);
2791 if (nv_device(priv)->chipset == 0xc1 ||
2792 nv_device(priv)->chipset == 0xd9)
2793 nv_icmd(priv, 0x0000057b, 0x00000059);
2794 nv_icmd(priv, 0x00000586, 0x00000040);
2795 nv_icmd(priv, 0x00000582, 0x00000080);
2796 nv_icmd(priv, 0x00000583, 0x00000080);
2797 nv_icmd(priv, 0x000005c2, 0x00000001);
2798 nv_icmd(priv, 0x00000638, 0x00000001);
2799 nv_icmd(priv, 0x00000639, 0x00000001);
2800 nv_icmd(priv, 0x0000063a, 0x00000002);
2801 nv_icmd(priv, 0x0000063b, 0x00000001);
2802 nv_icmd(priv, 0x0000063c, 0x00000001);
2803 nv_icmd(priv, 0x0000063d, 0x00000002);
2804 nv_icmd(priv, 0x0000063e, 0x00000001);
2805 nv_icmd(priv, 0x000008b8, 0x00000001);
2806 nv_icmd(priv, 0x000008b9, 0x00000001);
2807 nv_icmd(priv, 0x000008ba, 0x00000001);
2808 nv_icmd(priv, 0x000008bb, 0x00000001);
2809 nv_icmd(priv, 0x000008bc, 0x00000001);
2810 nv_icmd(priv, 0x000008bd, 0x00000001);
2811 nv_icmd(priv, 0x000008be, 0x00000001);
2812 nv_icmd(priv, 0x000008bf, 0x00000001);
2813 nv_icmd(priv, 0x00000900, 0x00000001);
2814 nv_icmd(priv, 0x00000901, 0x00000001);
2815 nv_icmd(priv, 0x00000902, 0x00000001);
2816 nv_icmd(priv, 0x00000903, 0x00000001);
2817 nv_icmd(priv, 0x00000904, 0x00000001);
2818 nv_icmd(priv, 0x00000905, 0x00000001);
2819 nv_icmd(priv, 0x00000906, 0x00000001);
2820 nv_icmd(priv, 0x00000907, 0x00000001);
2821 nv_icmd(priv, 0x00000908, 0x00000002);
2822 nv_icmd(priv, 0x00000909, 0x00000002);
2823 nv_icmd(priv, 0x0000090a, 0x00000002);
2824 nv_icmd(priv, 0x0000090b, 0x00000002);
2825 nv_icmd(priv, 0x0000090c, 0x00000002);
2826 nv_icmd(priv, 0x0000090d, 0x00000002);
2827 nv_icmd(priv, 0x0000090e, 0x00000002);
2828 nv_icmd(priv, 0x0000090f, 0x00000002);
2829 nv_icmd(priv, 0x00000910, 0x00000001);
2830 nv_icmd(priv, 0x00000911, 0x00000001);
2831 nv_icmd(priv, 0x00000912, 0x00000001);
2832 nv_icmd(priv, 0x00000913, 0x00000001);
2833 nv_icmd(priv, 0x00000914, 0x00000001);
2834 nv_icmd(priv, 0x00000915, 0x00000001);
2835 nv_icmd(priv, 0x00000916, 0x00000001);
2836 nv_icmd(priv, 0x00000917, 0x00000001);
2837 nv_icmd(priv, 0x00000918, 0x00000001);
2838 nv_icmd(priv, 0x00000919, 0x00000001);
2839 nv_icmd(priv, 0x0000091a, 0x00000001);
2840 nv_icmd(priv, 0x0000091b, 0x00000001);
2841 nv_icmd(priv, 0x0000091c, 0x00000001);
2842 nv_icmd(priv, 0x0000091d, 0x00000001);
2843 nv_icmd(priv, 0x0000091e, 0x00000001);
2844 nv_icmd(priv, 0x0000091f, 0x00000001);
2845 nv_icmd(priv, 0x00000920, 0x00000002);
2846 nv_icmd(priv, 0x00000921, 0x00000002);
2847 nv_icmd(priv, 0x00000922, 0x00000002);
2848 nv_icmd(priv, 0x00000923, 0x00000002);
2849 nv_icmd(priv, 0x00000924, 0x00000002);
2850 nv_icmd(priv, 0x00000925, 0x00000002);
2851 nv_icmd(priv, 0x00000926, 0x00000002);
2852 nv_icmd(priv, 0x00000927, 0x00000002);
2853 nv_icmd(priv, 0x00000928, 0x00000001);
2854 nv_icmd(priv, 0x00000929, 0x00000001);
2855 nv_icmd(priv, 0x0000092a, 0x00000001);
2856 nv_icmd(priv, 0x0000092b, 0x00000001);
2857 nv_icmd(priv, 0x0000092c, 0x00000001);
2858 nv_icmd(priv, 0x0000092d, 0x00000001);
2859 nv_icmd(priv, 0x0000092e, 0x00000001);
2860 nv_icmd(priv, 0x0000092f, 0x00000001);
2861 nv_icmd(priv, 0x00000648, 0x00000001);
2862 nv_icmd(priv, 0x00000649, 0x00000001);
2863 nv_icmd(priv, 0x0000064a, 0x00000001);
2864 nv_icmd(priv, 0x0000064b, 0x00000001);
2865 nv_icmd(priv, 0x0000064c, 0x00000001);
2866 nv_icmd(priv, 0x0000064d, 0x00000001);
2867 nv_icmd(priv, 0x0000064e, 0x00000001);
2868 nv_icmd(priv, 0x0000064f, 0x00000001);
2869 nv_icmd(priv, 0x00000650, 0x00000001);
2870 nv_icmd(priv, 0x00000658, 0x0000000f);
2871 nv_icmd(priv, 0x000007ff, 0x0000000a);
2872 nv_icmd(priv, 0x0000066a, 0x40000000);
2873 nv_icmd(priv, 0x0000066b, 0x10000000);
2874 nv_icmd(priv, 0x0000066c, 0xffff0000);
2875 nv_icmd(priv, 0x0000066d, 0xffff0000);
2876 nv_icmd(priv, 0x000007af, 0x00000008);
2877 nv_icmd(priv, 0x000007b0, 0x00000008);
2878 nv_icmd(priv, 0x000007f6, 0x00000001);
2879 nv_icmd(priv, 0x000006b2, 0x00000055);
2880 nv_icmd(priv, 0x000007ad, 0x00000003);
2881 nv_icmd(priv, 0x00000937, 0x00000001);
2882 nv_icmd(priv, 0x00000971, 0x00000008);
2883 nv_icmd(priv, 0x00000972, 0x00000040);
2884 nv_icmd(priv, 0x00000973, 0x0000012c);
2885 nv_icmd(priv, 0x0000097c, 0x00000040);
2886 nv_icmd(priv, 0x00000979, 0x00000003);
2887 nv_icmd(priv, 0x00000975, 0x00000020);
2888 nv_icmd(priv, 0x00000976, 0x00000001);
2889 nv_icmd(priv, 0x00000977, 0x00000020);
2890 nv_icmd(priv, 0x00000978, 0x00000001);
2891 nv_icmd(priv, 0x00000957, 0x00000003);
2892 nv_icmd(priv, 0x0000095e, 0x20164010);
2893 nv_icmd(priv, 0x0000095f, 0x00000020);
2894 if (nv_device(priv)->chipset == 0xd9)
2895 nv_icmd(priv, 0x0000097d, 0x00000020);
2896 nv_icmd(priv, 0x00000683, 0x00000006);
2897 nv_icmd(priv, 0x00000685, 0x003fffff);
2898 nv_icmd(priv, 0x00000687, 0x00000c48);
2899 nv_icmd(priv, 0x000006a0, 0x00000005);
2900 nv_icmd(priv, 0x00000840, 0x00300008);
2901 nv_icmd(priv, 0x00000841, 0x04000080);
2902 nv_icmd(priv, 0x00000842, 0x00300008);
2903 nv_icmd(priv, 0x00000843, 0x04000080);
2904 nv_icmd(priv, 0x00000818, 0x00000000);
2905 nv_icmd(priv, 0x00000819, 0x00000000);
2906 nv_icmd(priv, 0x0000081a, 0x00000000);
2907 nv_icmd(priv, 0x0000081b, 0x00000000);
2908 nv_icmd(priv, 0x0000081c, 0x00000000);
2909 nv_icmd(priv, 0x0000081d, 0x00000000);
2910 nv_icmd(priv, 0x0000081e, 0x00000000);
2911 nv_icmd(priv, 0x0000081f, 0x00000000);
2912 nv_icmd(priv, 0x00000848, 0x00000000);
2913 nv_icmd(priv, 0x00000849, 0x00000000);
2914 nv_icmd(priv, 0x0000084a, 0x00000000);
2915 nv_icmd(priv, 0x0000084b, 0x00000000);
2916 nv_icmd(priv, 0x0000084c, 0x00000000);
2917 nv_icmd(priv, 0x0000084d, 0x00000000);
2918 nv_icmd(priv, 0x0000084e, 0x00000000);
2919 nv_icmd(priv, 0x0000084f, 0x00000000);
2920 nv_icmd(priv, 0x00000850, 0x00000000);
2921 nv_icmd(priv, 0x00000851, 0x00000000);
2922 nv_icmd(priv, 0x00000852, 0x00000000);
2923 nv_icmd(priv, 0x00000853, 0x00000000);
2924 nv_icmd(priv, 0x00000854, 0x00000000);
2925 nv_icmd(priv, 0x00000855, 0x00000000);
2926 nv_icmd(priv, 0x00000856, 0x00000000);
2927 nv_icmd(priv, 0x00000857, 0x00000000);
2928 nv_icmd(priv, 0x00000738, 0x00000000);
2929 nv_icmd(priv, 0x000006aa, 0x00000001);
2930 nv_icmd(priv, 0x000006ab, 0x00000002);
2931 nv_icmd(priv, 0x000006ac, 0x00000080);
2932 nv_icmd(priv, 0x000006ad, 0x00000100);
2933 nv_icmd(priv, 0x000006ae, 0x00000100);
2934 nv_icmd(priv, 0x000006b1, 0x00000011);
2935 nv_icmd(priv, 0x000006bb, 0x000000cf);
2936 nv_icmd(priv, 0x000006ce, 0x2a712488);
2937 nv_icmd(priv, 0x00000739, 0x4085c000);
2938 nv_icmd(priv, 0x0000073a, 0x00000080);
2939 nv_icmd(priv, 0x00000786, 0x80000100);
2940 nv_icmd(priv, 0x0000073c, 0x00010100);
2941 nv_icmd(priv, 0x0000073d, 0x02800000);
2942 nv_icmd(priv, 0x00000787, 0x000000cf);
2943 nv_icmd(priv, 0x0000078c, 0x00000008);
2944 nv_icmd(priv, 0x00000792, 0x00000001);
2945 nv_icmd(priv, 0x00000794, 0x00000001);
2946 nv_icmd(priv, 0x00000795, 0x00000001);
2947 nv_icmd(priv, 0x00000796, 0x00000001);
2948 nv_icmd(priv, 0x00000797, 0x000000cf);
2949 nv_icmd(priv, 0x00000836, 0x00000001);
2950 nv_icmd(priv, 0x0000079a, 0x00000002);
2951 nv_icmd(priv, 0x00000833, 0x04444480);
2952 nv_icmd(priv, 0x000007a1, 0x00000001);
2953 nv_icmd(priv, 0x000007a3, 0x00000001);
2954 nv_icmd(priv, 0x000007a4, 0x00000001);
2955 nv_icmd(priv, 0x000007a5, 0x00000001);
2956 nv_icmd(priv, 0x00000831, 0x00000004);
2957 nv_icmd(priv, 0x0000080c, 0x00000002);
2958 nv_icmd(priv, 0x0000080d, 0x00000100);
2959 nv_icmd(priv, 0x0000080e, 0x00000100);
2960 nv_icmd(priv, 0x0000080f, 0x00000001);
2961 nv_icmd(priv, 0x00000823, 0x00000002);
2962 nv_icmd(priv, 0x00000824, 0x00000100);
2963 nv_icmd(priv, 0x00000825, 0x00000100);
2964 nv_icmd(priv, 0x00000826, 0x00000001);
2965 nv_icmd(priv, 0x0000095d, 0x00000001);
2966 nv_icmd(priv, 0x0000082b, 0x00000004);
2967 nv_icmd(priv, 0x00000942, 0x00010001);
2968 nv_icmd(priv, 0x00000943, 0x00000001);
2969 nv_icmd(priv, 0x00000944, 0x00000022);
2970 nv_icmd(priv, 0x000007c5, 0x00010001);
2971 nv_icmd(priv, 0x00000834, 0x00000001);
2972 nv_icmd(priv, 0x000007c7, 0x00000001);
2973 nv_icmd(priv, 0x0000c1b0, 0x0000000f);
2974 nv_icmd(priv, 0x0000c1b1, 0x0000000f);
2975 nv_icmd(priv, 0x0000c1b2, 0x0000000f);
2976 nv_icmd(priv, 0x0000c1b3, 0x0000000f);
2977 nv_icmd(priv, 0x0000c1b4, 0x0000000f);
2978 nv_icmd(priv, 0x0000c1b5, 0x0000000f);
2979 nv_icmd(priv, 0x0000c1b6, 0x0000000f);
2980 nv_icmd(priv, 0x0000c1b7, 0x0000000f);
2981 nv_icmd(priv, 0x0000c1b8, 0x0fac6881);
2982 nv_icmd(priv, 0x0000c1b9, 0x00fac688);
2983 nv_icmd(priv, 0x0001e100, 0x00000001);
2984 nv_icmd(priv, 0x00001000, 0x00000002);
2985 nv_icmd(priv, 0x000006aa, 0x00000001);
2986 nv_icmd(priv, 0x000006ad, 0x00000100);
2987 nv_icmd(priv, 0x000006ae, 0x00000100);
2988 nv_icmd(priv, 0x000006b1, 0x00000011);
2989 nv_icmd(priv, 0x0000078c, 0x00000008);
2990 nv_icmd(priv, 0x00000792, 0x00000001);
2991 nv_icmd(priv, 0x00000794, 0x00000001);
2992 nv_icmd(priv, 0x00000795, 0x00000001);
2993 nv_icmd(priv, 0x00000796, 0x00000001);
2994 nv_icmd(priv, 0x00000797, 0x000000cf);
2995 nv_icmd(priv, 0x0000079a, 0x00000002);
2996 nv_icmd(priv, 0x00000833, 0x04444480);
2997 nv_icmd(priv, 0x000007a1, 0x00000001);
2998 nv_icmd(priv, 0x000007a3, 0x00000001);
2999 nv_icmd(priv, 0x000007a4, 0x00000001);
3000 nv_icmd(priv, 0x000007a5, 0x00000001);
3001 nv_icmd(priv, 0x00000831, 0x00000004);
3002 nv_icmd(priv, 0x0001e100, 0x00000001);
3003 nv_icmd(priv, 0x00001000, 0x00000014);
3004 nv_icmd(priv, 0x00000351, 0x00000100);
3005 nv_icmd(priv, 0x00000957, 0x00000003);
3006 nv_icmd(priv, 0x0000095d, 0x00000001);
3007 nv_icmd(priv, 0x0000082b, 0x00000004);
3008 nv_icmd(priv, 0x00000942, 0x00010001);
3009 nv_icmd(priv, 0x00000943, 0x00000001);
3010 nv_icmd(priv, 0x000007c5, 0x00010001);
3011 nv_icmd(priv, 0x00000834, 0x00000001);
3012 nv_icmd(priv, 0x000007c7, 0x00000001);
3013 nv_icmd(priv, 0x0001e100, 0x00000001);
3014 nv_icmd(priv, 0x00001000, 0x00000001);
3015 nv_icmd(priv, 0x0000080c, 0x00000002);
3016 nv_icmd(priv, 0x0000080d, 0x00000100);
3017 nv_icmd(priv, 0x0000080e, 0x00000100);
3018 nv_icmd(priv, 0x0000080f, 0x00000001);
3019 nv_icmd(priv, 0x00000823, 0x00000002);
3020 nv_icmd(priv, 0x00000824, 0x00000100);
3021 nv_icmd(priv, 0x00000825, 0x00000100);
3022 nv_icmd(priv, 0x00000826, 0x00000001);
3023 nv_icmd(priv, 0x0001e100, 0x00000001);
3024 nv_wr32(priv, 0x400208, 0x00000000);
3025 nv_wr32(priv, 0x404154, 0x00000400);
3026
3027 nvc0_grctx_generate_9097(priv);
3028 if (fermi >= 0x9197)
3029 nvc0_grctx_generate_9197(priv);
3030 if (fermi >= 0x9297)
3031 nvc0_grctx_generate_9297(priv);
3032 nvc0_grctx_generate_902d(priv);
3033 nvc0_grctx_generate_9039(priv);
3034 nvc0_grctx_generate_90c0(priv);
3035
3036 nv_wr32(priv, 0x000260, r000260);
3037
3038 return nvc0_grctx_fini(&info);
3039}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
new file mode 100644
index 000000000000..6d8c63931ee6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -0,0 +1,2788 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26
27static void
28nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
29{
30 nv_wr32(priv, 0x400208, 0x80000000);
31 nv_icmd(priv, 0x001000, 0x00000004);
32 nv_icmd(priv, 0x000039, 0x00000000);
33 nv_icmd(priv, 0x00003a, 0x00000000);
34 nv_icmd(priv, 0x00003b, 0x00000000);
35 nv_icmd(priv, 0x0000a9, 0x0000ffff);
36 nv_icmd(priv, 0x000038, 0x0fac6881);
37 nv_icmd(priv, 0x00003d, 0x00000001);
38 nv_icmd(priv, 0x0000e8, 0x00000400);
39 nv_icmd(priv, 0x0000e9, 0x00000400);
40 nv_icmd(priv, 0x0000ea, 0x00000400);
41 nv_icmd(priv, 0x0000eb, 0x00000400);
42 nv_icmd(priv, 0x0000ec, 0x00000400);
43 nv_icmd(priv, 0x0000ed, 0x00000400);
44 nv_icmd(priv, 0x0000ee, 0x00000400);
45 nv_icmd(priv, 0x0000ef, 0x00000400);
46 nv_icmd(priv, 0x000078, 0x00000300);
47 nv_icmd(priv, 0x000079, 0x00000300);
48 nv_icmd(priv, 0x00007a, 0x00000300);
49 nv_icmd(priv, 0x00007b, 0x00000300);
50 nv_icmd(priv, 0x00007c, 0x00000300);
51 nv_icmd(priv, 0x00007d, 0x00000300);
52 nv_icmd(priv, 0x00007e, 0x00000300);
53 nv_icmd(priv, 0x00007f, 0x00000300);
54 nv_icmd(priv, 0x000050, 0x00000011);
55 nv_icmd(priv, 0x000058, 0x00000008);
56 nv_icmd(priv, 0x000059, 0x00000008);
57 nv_icmd(priv, 0x00005a, 0x00000008);
58 nv_icmd(priv, 0x00005b, 0x00000008);
59 nv_icmd(priv, 0x00005c, 0x00000008);
60 nv_icmd(priv, 0x00005d, 0x00000008);
61 nv_icmd(priv, 0x00005e, 0x00000008);
62 nv_icmd(priv, 0x00005f, 0x00000008);
63 nv_icmd(priv, 0x000208, 0x00000001);
64 nv_icmd(priv, 0x000209, 0x00000001);
65 nv_icmd(priv, 0x00020a, 0x00000001);
66 nv_icmd(priv, 0x00020b, 0x00000001);
67 nv_icmd(priv, 0x00020c, 0x00000001);
68 nv_icmd(priv, 0x00020d, 0x00000001);
69 nv_icmd(priv, 0x00020e, 0x00000001);
70 nv_icmd(priv, 0x00020f, 0x00000001);
71 nv_icmd(priv, 0x000081, 0x00000001);
72 nv_icmd(priv, 0x000085, 0x00000004);
73 nv_icmd(priv, 0x000088, 0x00000400);
74 nv_icmd(priv, 0x000090, 0x00000300);
75 nv_icmd(priv, 0x000098, 0x00001001);
76 nv_icmd(priv, 0x0000e3, 0x00000001);
77 nv_icmd(priv, 0x0000da, 0x00000001);
78 nv_icmd(priv, 0x0000f8, 0x00000003);
79 nv_icmd(priv, 0x0000fa, 0x00000001);
80 nv_icmd(priv, 0x00009f, 0x0000ffff);
81 nv_icmd(priv, 0x0000a0, 0x0000ffff);
82 nv_icmd(priv, 0x0000a1, 0x0000ffff);
83 nv_icmd(priv, 0x0000a2, 0x0000ffff);
84 nv_icmd(priv, 0x0000b1, 0x00000001);
85 nv_icmd(priv, 0x0000ad, 0x0000013e);
86 nv_icmd(priv, 0x0000e1, 0x00000010);
87 nv_icmd(priv, 0x000290, 0x00000000);
88 nv_icmd(priv, 0x000291, 0x00000000);
89 nv_icmd(priv, 0x000292, 0x00000000);
90 nv_icmd(priv, 0x000293, 0x00000000);
91 nv_icmd(priv, 0x000294, 0x00000000);
92 nv_icmd(priv, 0x000295, 0x00000000);
93 nv_icmd(priv, 0x000296, 0x00000000);
94 nv_icmd(priv, 0x000297, 0x00000000);
95 nv_icmd(priv, 0x000298, 0x00000000);
96 nv_icmd(priv, 0x000299, 0x00000000);
97 nv_icmd(priv, 0x00029a, 0x00000000);
98 nv_icmd(priv, 0x00029b, 0x00000000);
99 nv_icmd(priv, 0x00029c, 0x00000000);
100 nv_icmd(priv, 0x00029d, 0x00000000);
101 nv_icmd(priv, 0x00029e, 0x00000000);
102 nv_icmd(priv, 0x00029f, 0x00000000);
103 nv_icmd(priv, 0x0003b0, 0x00000000);
104 nv_icmd(priv, 0x0003b1, 0x00000000);
105 nv_icmd(priv, 0x0003b2, 0x00000000);
106 nv_icmd(priv, 0x0003b3, 0x00000000);
107 nv_icmd(priv, 0x0003b4, 0x00000000);
108 nv_icmd(priv, 0x0003b5, 0x00000000);
109 nv_icmd(priv, 0x0003b6, 0x00000000);
110 nv_icmd(priv, 0x0003b7, 0x00000000);
111 nv_icmd(priv, 0x0003b8, 0x00000000);
112 nv_icmd(priv, 0x0003b9, 0x00000000);
113 nv_icmd(priv, 0x0003ba, 0x00000000);
114 nv_icmd(priv, 0x0003bb, 0x00000000);
115 nv_icmd(priv, 0x0003bc, 0x00000000);
116 nv_icmd(priv, 0x0003bd, 0x00000000);
117 nv_icmd(priv, 0x0003be, 0x00000000);
118 nv_icmd(priv, 0x0003bf, 0x00000000);
119 nv_icmd(priv, 0x0002a0, 0x00000000);
120 nv_icmd(priv, 0x0002a1, 0x00000000);
121 nv_icmd(priv, 0x0002a2, 0x00000000);
122 nv_icmd(priv, 0x0002a3, 0x00000000);
123 nv_icmd(priv, 0x0002a4, 0x00000000);
124 nv_icmd(priv, 0x0002a5, 0x00000000);
125 nv_icmd(priv, 0x0002a6, 0x00000000);
126 nv_icmd(priv, 0x0002a7, 0x00000000);
127 nv_icmd(priv, 0x0002a8, 0x00000000);
128 nv_icmd(priv, 0x0002a9, 0x00000000);
129 nv_icmd(priv, 0x0002aa, 0x00000000);
130 nv_icmd(priv, 0x0002ab, 0x00000000);
131 nv_icmd(priv, 0x0002ac, 0x00000000);
132 nv_icmd(priv, 0x0002ad, 0x00000000);
133 nv_icmd(priv, 0x0002ae, 0x00000000);
134 nv_icmd(priv, 0x0002af, 0x00000000);
135 nv_icmd(priv, 0x000420, 0x00000000);
136 nv_icmd(priv, 0x000421, 0x00000000);
137 nv_icmd(priv, 0x000422, 0x00000000);
138 nv_icmd(priv, 0x000423, 0x00000000);
139 nv_icmd(priv, 0x000424, 0x00000000);
140 nv_icmd(priv, 0x000425, 0x00000000);
141 nv_icmd(priv, 0x000426, 0x00000000);
142 nv_icmd(priv, 0x000427, 0x00000000);
143 nv_icmd(priv, 0x000428, 0x00000000);
144 nv_icmd(priv, 0x000429, 0x00000000);
145 nv_icmd(priv, 0x00042a, 0x00000000);
146 nv_icmd(priv, 0x00042b, 0x00000000);
147 nv_icmd(priv, 0x00042c, 0x00000000);
148 nv_icmd(priv, 0x00042d, 0x00000000);
149 nv_icmd(priv, 0x00042e, 0x00000000);
150 nv_icmd(priv, 0x00042f, 0x00000000);
151 nv_icmd(priv, 0x0002b0, 0x00000000);
152 nv_icmd(priv, 0x0002b1, 0x00000000);
153 nv_icmd(priv, 0x0002b2, 0x00000000);
154 nv_icmd(priv, 0x0002b3, 0x00000000);
155 nv_icmd(priv, 0x0002b4, 0x00000000);
156 nv_icmd(priv, 0x0002b5, 0x00000000);
157 nv_icmd(priv, 0x0002b6, 0x00000000);
158 nv_icmd(priv, 0x0002b7, 0x00000000);
159 nv_icmd(priv, 0x0002b8, 0x00000000);
160 nv_icmd(priv, 0x0002b9, 0x00000000);
161 nv_icmd(priv, 0x0002ba, 0x00000000);
162 nv_icmd(priv, 0x0002bb, 0x00000000);
163 nv_icmd(priv, 0x0002bc, 0x00000000);
164 nv_icmd(priv, 0x0002bd, 0x00000000);
165 nv_icmd(priv, 0x0002be, 0x00000000);
166 nv_icmd(priv, 0x0002bf, 0x00000000);
167 nv_icmd(priv, 0x000430, 0x00000000);
168 nv_icmd(priv, 0x000431, 0x00000000);
169 nv_icmd(priv, 0x000432, 0x00000000);
170 nv_icmd(priv, 0x000433, 0x00000000);
171 nv_icmd(priv, 0x000434, 0x00000000);
172 nv_icmd(priv, 0x000435, 0x00000000);
173 nv_icmd(priv, 0x000436, 0x00000000);
174 nv_icmd(priv, 0x000437, 0x00000000);
175 nv_icmd(priv, 0x000438, 0x00000000);
176 nv_icmd(priv, 0x000439, 0x00000000);
177 nv_icmd(priv, 0x00043a, 0x00000000);
178 nv_icmd(priv, 0x00043b, 0x00000000);
179 nv_icmd(priv, 0x00043c, 0x00000000);
180 nv_icmd(priv, 0x00043d, 0x00000000);
181 nv_icmd(priv, 0x00043e, 0x00000000);
182 nv_icmd(priv, 0x00043f, 0x00000000);
183 nv_icmd(priv, 0x0002c0, 0x00000000);
184 nv_icmd(priv, 0x0002c1, 0x00000000);
185 nv_icmd(priv, 0x0002c2, 0x00000000);
186 nv_icmd(priv, 0x0002c3, 0x00000000);
187 nv_icmd(priv, 0x0002c4, 0x00000000);
188 nv_icmd(priv, 0x0002c5, 0x00000000);
189 nv_icmd(priv, 0x0002c6, 0x00000000);
190 nv_icmd(priv, 0x0002c7, 0x00000000);
191 nv_icmd(priv, 0x0002c8, 0x00000000);
192 nv_icmd(priv, 0x0002c9, 0x00000000);
193 nv_icmd(priv, 0x0002ca, 0x00000000);
194 nv_icmd(priv, 0x0002cb, 0x00000000);
195 nv_icmd(priv, 0x0002cc, 0x00000000);
196 nv_icmd(priv, 0x0002cd, 0x00000000);
197 nv_icmd(priv, 0x0002ce, 0x00000000);
198 nv_icmd(priv, 0x0002cf, 0x00000000);
199 nv_icmd(priv, 0x0004d0, 0x00000000);
200 nv_icmd(priv, 0x0004d1, 0x00000000);
201 nv_icmd(priv, 0x0004d2, 0x00000000);
202 nv_icmd(priv, 0x0004d3, 0x00000000);
203 nv_icmd(priv, 0x0004d4, 0x00000000);
204 nv_icmd(priv, 0x0004d5, 0x00000000);
205 nv_icmd(priv, 0x0004d6, 0x00000000);
206 nv_icmd(priv, 0x0004d7, 0x00000000);
207 nv_icmd(priv, 0x0004d8, 0x00000000);
208 nv_icmd(priv, 0x0004d9, 0x00000000);
209 nv_icmd(priv, 0x0004da, 0x00000000);
210 nv_icmd(priv, 0x0004db, 0x00000000);
211 nv_icmd(priv, 0x0004dc, 0x00000000);
212 nv_icmd(priv, 0x0004dd, 0x00000000);
213 nv_icmd(priv, 0x0004de, 0x00000000);
214 nv_icmd(priv, 0x0004df, 0x00000000);
215 nv_icmd(priv, 0x000720, 0x00000000);
216 nv_icmd(priv, 0x000721, 0x00000000);
217 nv_icmd(priv, 0x000722, 0x00000000);
218 nv_icmd(priv, 0x000723, 0x00000000);
219 nv_icmd(priv, 0x000724, 0x00000000);
220 nv_icmd(priv, 0x000725, 0x00000000);
221 nv_icmd(priv, 0x000726, 0x00000000);
222 nv_icmd(priv, 0x000727, 0x00000000);
223 nv_icmd(priv, 0x000728, 0x00000000);
224 nv_icmd(priv, 0x000729, 0x00000000);
225 nv_icmd(priv, 0x00072a, 0x00000000);
226 nv_icmd(priv, 0x00072b, 0x00000000);
227 nv_icmd(priv, 0x00072c, 0x00000000);
228 nv_icmd(priv, 0x00072d, 0x00000000);
229 nv_icmd(priv, 0x00072e, 0x00000000);
230 nv_icmd(priv, 0x00072f, 0x00000000);
231 nv_icmd(priv, 0x0008c0, 0x00000000);
232 nv_icmd(priv, 0x0008c1, 0x00000000);
233 nv_icmd(priv, 0x0008c2, 0x00000000);
234 nv_icmd(priv, 0x0008c3, 0x00000000);
235 nv_icmd(priv, 0x0008c4, 0x00000000);
236 nv_icmd(priv, 0x0008c5, 0x00000000);
237 nv_icmd(priv, 0x0008c6, 0x00000000);
238 nv_icmd(priv, 0x0008c7, 0x00000000);
239 nv_icmd(priv, 0x0008c8, 0x00000000);
240 nv_icmd(priv, 0x0008c9, 0x00000000);
241 nv_icmd(priv, 0x0008ca, 0x00000000);
242 nv_icmd(priv, 0x0008cb, 0x00000000);
243 nv_icmd(priv, 0x0008cc, 0x00000000);
244 nv_icmd(priv, 0x0008cd, 0x00000000);
245 nv_icmd(priv, 0x0008ce, 0x00000000);
246 nv_icmd(priv, 0x0008cf, 0x00000000);
247 nv_icmd(priv, 0x000890, 0x00000000);
248 nv_icmd(priv, 0x000891, 0x00000000);
249 nv_icmd(priv, 0x000892, 0x00000000);
250 nv_icmd(priv, 0x000893, 0x00000000);
251 nv_icmd(priv, 0x000894, 0x00000000);
252 nv_icmd(priv, 0x000895, 0x00000000);
253 nv_icmd(priv, 0x000896, 0x00000000);
254 nv_icmd(priv, 0x000897, 0x00000000);
255 nv_icmd(priv, 0x000898, 0x00000000);
256 nv_icmd(priv, 0x000899, 0x00000000);
257 nv_icmd(priv, 0x00089a, 0x00000000);
258 nv_icmd(priv, 0x00089b, 0x00000000);
259 nv_icmd(priv, 0x00089c, 0x00000000);
260 nv_icmd(priv, 0x00089d, 0x00000000);
261 nv_icmd(priv, 0x00089e, 0x00000000);
262 nv_icmd(priv, 0x00089f, 0x00000000);
263 nv_icmd(priv, 0x0008e0, 0x00000000);
264 nv_icmd(priv, 0x0008e1, 0x00000000);
265 nv_icmd(priv, 0x0008e2, 0x00000000);
266 nv_icmd(priv, 0x0008e3, 0x00000000);
267 nv_icmd(priv, 0x0008e4, 0x00000000);
268 nv_icmd(priv, 0x0008e5, 0x00000000);
269 nv_icmd(priv, 0x0008e6, 0x00000000);
270 nv_icmd(priv, 0x0008e7, 0x00000000);
271 nv_icmd(priv, 0x0008e8, 0x00000000);
272 nv_icmd(priv, 0x0008e9, 0x00000000);
273 nv_icmd(priv, 0x0008ea, 0x00000000);
274 nv_icmd(priv, 0x0008eb, 0x00000000);
275 nv_icmd(priv, 0x0008ec, 0x00000000);
276 nv_icmd(priv, 0x0008ed, 0x00000000);
277 nv_icmd(priv, 0x0008ee, 0x00000000);
278 nv_icmd(priv, 0x0008ef, 0x00000000);
279 nv_icmd(priv, 0x0008a0, 0x00000000);
280 nv_icmd(priv, 0x0008a1, 0x00000000);
281 nv_icmd(priv, 0x0008a2, 0x00000000);
282 nv_icmd(priv, 0x0008a3, 0x00000000);
283 nv_icmd(priv, 0x0008a4, 0x00000000);
284 nv_icmd(priv, 0x0008a5, 0x00000000);
285 nv_icmd(priv, 0x0008a6, 0x00000000);
286 nv_icmd(priv, 0x0008a7, 0x00000000);
287 nv_icmd(priv, 0x0008a8, 0x00000000);
288 nv_icmd(priv, 0x0008a9, 0x00000000);
289 nv_icmd(priv, 0x0008aa, 0x00000000);
290 nv_icmd(priv, 0x0008ab, 0x00000000);
291 nv_icmd(priv, 0x0008ac, 0x00000000);
292 nv_icmd(priv, 0x0008ad, 0x00000000);
293 nv_icmd(priv, 0x0008ae, 0x00000000);
294 nv_icmd(priv, 0x0008af, 0x00000000);
295 nv_icmd(priv, 0x0008f0, 0x00000000);
296 nv_icmd(priv, 0x0008f1, 0x00000000);
297 nv_icmd(priv, 0x0008f2, 0x00000000);
298 nv_icmd(priv, 0x0008f3, 0x00000000);
299 nv_icmd(priv, 0x0008f4, 0x00000000);
300 nv_icmd(priv, 0x0008f5, 0x00000000);
301 nv_icmd(priv, 0x0008f6, 0x00000000);
302 nv_icmd(priv, 0x0008f7, 0x00000000);
303 nv_icmd(priv, 0x0008f8, 0x00000000);
304 nv_icmd(priv, 0x0008f9, 0x00000000);
305 nv_icmd(priv, 0x0008fa, 0x00000000);
306 nv_icmd(priv, 0x0008fb, 0x00000000);
307 nv_icmd(priv, 0x0008fc, 0x00000000);
308 nv_icmd(priv, 0x0008fd, 0x00000000);
309 nv_icmd(priv, 0x0008fe, 0x00000000);
310 nv_icmd(priv, 0x0008ff, 0x00000000);
311 nv_icmd(priv, 0x00094c, 0x000000ff);
312 nv_icmd(priv, 0x00094d, 0xffffffff);
313 nv_icmd(priv, 0x00094e, 0x00000002);
314 nv_icmd(priv, 0x0002ec, 0x00000001);
315 nv_icmd(priv, 0x000303, 0x00000001);
316 nv_icmd(priv, 0x0002e6, 0x00000001);
317 nv_icmd(priv, 0x000466, 0x00000052);
318 nv_icmd(priv, 0x000301, 0x3f800000);
319 nv_icmd(priv, 0x000304, 0x30201000);
320 nv_icmd(priv, 0x000305, 0x70605040);
321 nv_icmd(priv, 0x000306, 0xb8a89888);
322 nv_icmd(priv, 0x000307, 0xf8e8d8c8);
323 nv_icmd(priv, 0x00030a, 0x00ffff00);
324 nv_icmd(priv, 0x00030b, 0x0000001a);
325 nv_icmd(priv, 0x00030c, 0x00000001);
326 nv_icmd(priv, 0x000318, 0x00000001);
327 nv_icmd(priv, 0x000340, 0x00000000);
328 nv_icmd(priv, 0x000375, 0x00000001);
329 nv_icmd(priv, 0x00037d, 0x00000006);
330 nv_icmd(priv, 0x0003a0, 0x00000002);
331 nv_icmd(priv, 0x0003aa, 0x00000001);
332 nv_icmd(priv, 0x0003a9, 0x00000001);
333 nv_icmd(priv, 0x000380, 0x00000001);
334 nv_icmd(priv, 0x000383, 0x00000011);
335 nv_icmd(priv, 0x000360, 0x00000040);
336 nv_icmd(priv, 0x000366, 0x00000000);
337 nv_icmd(priv, 0x000367, 0x00000000);
338 nv_icmd(priv, 0x000368, 0x00000fff);
339 nv_icmd(priv, 0x000370, 0x00000000);
340 nv_icmd(priv, 0x000371, 0x00000000);
341 nv_icmd(priv, 0x000372, 0x000fffff);
342 nv_icmd(priv, 0x00037a, 0x00000012);
343 nv_icmd(priv, 0x000619, 0x00000003);
344 nv_icmd(priv, 0x000811, 0x00000003);
345 nv_icmd(priv, 0x000812, 0x00000004);
346 nv_icmd(priv, 0x000813, 0x00000006);
347 nv_icmd(priv, 0x000814, 0x00000008);
348 nv_icmd(priv, 0x000815, 0x0000000b);
349 nv_icmd(priv, 0x000800, 0x00000001);
350 nv_icmd(priv, 0x000801, 0x00000001);
351 nv_icmd(priv, 0x000802, 0x00000001);
352 nv_icmd(priv, 0x000803, 0x00000001);
353 nv_icmd(priv, 0x000804, 0x00000001);
354 nv_icmd(priv, 0x000805, 0x00000001);
355 nv_icmd(priv, 0x000632, 0x00000001);
356 nv_icmd(priv, 0x000633, 0x00000002);
357 nv_icmd(priv, 0x000634, 0x00000003);
358 nv_icmd(priv, 0x000635, 0x00000004);
359 nv_icmd(priv, 0x000654, 0x3f800000);
360 nv_icmd(priv, 0x000657, 0x3f800000);
361 nv_icmd(priv, 0x000655, 0x3f800000);
362 nv_icmd(priv, 0x000656, 0x3f800000);
363 nv_icmd(priv, 0x0006cd, 0x3f800000);
364 nv_icmd(priv, 0x0007f5, 0x3f800000);
365 nv_icmd(priv, 0x0007dc, 0x39291909);
366 nv_icmd(priv, 0x0007dd, 0x79695949);
367 nv_icmd(priv, 0x0007de, 0xb9a99989);
368 nv_icmd(priv, 0x0007df, 0xf9e9d9c9);
369 nv_icmd(priv, 0x0007e8, 0x00003210);
370 nv_icmd(priv, 0x0007e9, 0x00007654);
371 nv_icmd(priv, 0x0007ea, 0x00000098);
372 nv_icmd(priv, 0x0007ec, 0x39291909);
373 nv_icmd(priv, 0x0007ed, 0x79695949);
374 nv_icmd(priv, 0x0007ee, 0xb9a99989);
375 nv_icmd(priv, 0x0007ef, 0xf9e9d9c9);
376 nv_icmd(priv, 0x0007f0, 0x00003210);
377 nv_icmd(priv, 0x0007f1, 0x00007654);
378 nv_icmd(priv, 0x0007f2, 0x00000098);
379 nv_icmd(priv, 0x0005a5, 0x00000001);
380 nv_icmd(priv, 0x000980, 0x00000000);
381 nv_icmd(priv, 0x000981, 0x00000000);
382 nv_icmd(priv, 0x000982, 0x00000000);
383 nv_icmd(priv, 0x000983, 0x00000000);
384 nv_icmd(priv, 0x000984, 0x00000000);
385 nv_icmd(priv, 0x000985, 0x00000000);
386 nv_icmd(priv, 0x000986, 0x00000000);
387 nv_icmd(priv, 0x000987, 0x00000000);
388 nv_icmd(priv, 0x000988, 0x00000000);
389 nv_icmd(priv, 0x000989, 0x00000000);
390 nv_icmd(priv, 0x00098a, 0x00000000);
391 nv_icmd(priv, 0x00098b, 0x00000000);
392 nv_icmd(priv, 0x00098c, 0x00000000);
393 nv_icmd(priv, 0x00098d, 0x00000000);
394 nv_icmd(priv, 0x00098e, 0x00000000);
395 nv_icmd(priv, 0x00098f, 0x00000000);
396 nv_icmd(priv, 0x000990, 0x00000000);
397 nv_icmd(priv, 0x000991, 0x00000000);
398 nv_icmd(priv, 0x000992, 0x00000000);
399 nv_icmd(priv, 0x000993, 0x00000000);
400 nv_icmd(priv, 0x000994, 0x00000000);
401 nv_icmd(priv, 0x000995, 0x00000000);
402 nv_icmd(priv, 0x000996, 0x00000000);
403 nv_icmd(priv, 0x000997, 0x00000000);
404 nv_icmd(priv, 0x000998, 0x00000000);
405 nv_icmd(priv, 0x000999, 0x00000000);
406 nv_icmd(priv, 0x00099a, 0x00000000);
407 nv_icmd(priv, 0x00099b, 0x00000000);
408 nv_icmd(priv, 0x00099c, 0x00000000);
409 nv_icmd(priv, 0x00099d, 0x00000000);
410 nv_icmd(priv, 0x00099e, 0x00000000);
411 nv_icmd(priv, 0x00099f, 0x00000000);
412 nv_icmd(priv, 0x0009a0, 0x00000000);
413 nv_icmd(priv, 0x0009a1, 0x00000000);
414 nv_icmd(priv, 0x0009a2, 0x00000000);
415 nv_icmd(priv, 0x0009a3, 0x00000000);
416 nv_icmd(priv, 0x0009a4, 0x00000000);
417 nv_icmd(priv, 0x0009a5, 0x00000000);
418 nv_icmd(priv, 0x0009a6, 0x00000000);
419 nv_icmd(priv, 0x0009a7, 0x00000000);
420 nv_icmd(priv, 0x0009a8, 0x00000000);
421 nv_icmd(priv, 0x0009a9, 0x00000000);
422 nv_icmd(priv, 0x0009aa, 0x00000000);
423 nv_icmd(priv, 0x0009ab, 0x00000000);
424 nv_icmd(priv, 0x0009ac, 0x00000000);
425 nv_icmd(priv, 0x0009ad, 0x00000000);
426 nv_icmd(priv, 0x0009ae, 0x00000000);
427 nv_icmd(priv, 0x0009af, 0x00000000);
428 nv_icmd(priv, 0x0009b0, 0x00000000);
429 nv_icmd(priv, 0x0009b1, 0x00000000);
430 nv_icmd(priv, 0x0009b2, 0x00000000);
431 nv_icmd(priv, 0x0009b3, 0x00000000);
432 nv_icmd(priv, 0x0009b4, 0x00000000);
433 nv_icmd(priv, 0x0009b5, 0x00000000);
434 nv_icmd(priv, 0x0009b6, 0x00000000);
435 nv_icmd(priv, 0x0009b7, 0x00000000);
436 nv_icmd(priv, 0x0009b8, 0x00000000);
437 nv_icmd(priv, 0x0009b9, 0x00000000);
438 nv_icmd(priv, 0x0009ba, 0x00000000);
439 nv_icmd(priv, 0x0009bb, 0x00000000);
440 nv_icmd(priv, 0x0009bc, 0x00000000);
441 nv_icmd(priv, 0x0009bd, 0x00000000);
442 nv_icmd(priv, 0x0009be, 0x00000000);
443 nv_icmd(priv, 0x0009bf, 0x00000000);
444 nv_icmd(priv, 0x0009c0, 0x00000000);
445 nv_icmd(priv, 0x0009c1, 0x00000000);
446 nv_icmd(priv, 0x0009c2, 0x00000000);
447 nv_icmd(priv, 0x0009c3, 0x00000000);
448 nv_icmd(priv, 0x0009c4, 0x00000000);
449 nv_icmd(priv, 0x0009c5, 0x00000000);
450 nv_icmd(priv, 0x0009c6, 0x00000000);
451 nv_icmd(priv, 0x0009c7, 0x00000000);
452 nv_icmd(priv, 0x0009c8, 0x00000000);
453 nv_icmd(priv, 0x0009c9, 0x00000000);
454 nv_icmd(priv, 0x0009ca, 0x00000000);
455 nv_icmd(priv, 0x0009cb, 0x00000000);
456 nv_icmd(priv, 0x0009cc, 0x00000000);
457 nv_icmd(priv, 0x0009cd, 0x00000000);
458 nv_icmd(priv, 0x0009ce, 0x00000000);
459 nv_icmd(priv, 0x0009cf, 0x00000000);
460 nv_icmd(priv, 0x0009d0, 0x00000000);
461 nv_icmd(priv, 0x0009d1, 0x00000000);
462 nv_icmd(priv, 0x0009d2, 0x00000000);
463 nv_icmd(priv, 0x0009d3, 0x00000000);
464 nv_icmd(priv, 0x0009d4, 0x00000000);
465 nv_icmd(priv, 0x0009d5, 0x00000000);
466 nv_icmd(priv, 0x0009d6, 0x00000000);
467 nv_icmd(priv, 0x0009d7, 0x00000000);
468 nv_icmd(priv, 0x0009d8, 0x00000000);
469 nv_icmd(priv, 0x0009d9, 0x00000000);
470 nv_icmd(priv, 0x0009da, 0x00000000);
471 nv_icmd(priv, 0x0009db, 0x00000000);
472 nv_icmd(priv, 0x0009dc, 0x00000000);
473 nv_icmd(priv, 0x0009dd, 0x00000000);
474 nv_icmd(priv, 0x0009de, 0x00000000);
475 nv_icmd(priv, 0x0009df, 0x00000000);
476 nv_icmd(priv, 0x0009e0, 0x00000000);
477 nv_icmd(priv, 0x0009e1, 0x00000000);
478 nv_icmd(priv, 0x0009e2, 0x00000000);
479 nv_icmd(priv, 0x0009e3, 0x00000000);
480 nv_icmd(priv, 0x0009e4, 0x00000000);
481 nv_icmd(priv, 0x0009e5, 0x00000000);
482 nv_icmd(priv, 0x0009e6, 0x00000000);
483 nv_icmd(priv, 0x0009e7, 0x00000000);
484 nv_icmd(priv, 0x0009e8, 0x00000000);
485 nv_icmd(priv, 0x0009e9, 0x00000000);
486 nv_icmd(priv, 0x0009ea, 0x00000000);
487 nv_icmd(priv, 0x0009eb, 0x00000000);
488 nv_icmd(priv, 0x0009ec, 0x00000000);
489 nv_icmd(priv, 0x0009ed, 0x00000000);
490 nv_icmd(priv, 0x0009ee, 0x00000000);
491 nv_icmd(priv, 0x0009ef, 0x00000000);
492 nv_icmd(priv, 0x0009f0, 0x00000000);
493 nv_icmd(priv, 0x0009f1, 0x00000000);
494 nv_icmd(priv, 0x0009f2, 0x00000000);
495 nv_icmd(priv, 0x0009f3, 0x00000000);
496 nv_icmd(priv, 0x0009f4, 0x00000000);
497 nv_icmd(priv, 0x0009f5, 0x00000000);
498 nv_icmd(priv, 0x0009f6, 0x00000000);
499 nv_icmd(priv, 0x0009f7, 0x00000000);
500 nv_icmd(priv, 0x0009f8, 0x00000000);
501 nv_icmd(priv, 0x0009f9, 0x00000000);
502 nv_icmd(priv, 0x0009fa, 0x00000000);
503 nv_icmd(priv, 0x0009fb, 0x00000000);
504 nv_icmd(priv, 0x0009fc, 0x00000000);
505 nv_icmd(priv, 0x0009fd, 0x00000000);
506 nv_icmd(priv, 0x0009fe, 0x00000000);
507 nv_icmd(priv, 0x0009ff, 0x00000000);
508 nv_icmd(priv, 0x000468, 0x00000004);
509 nv_icmd(priv, 0x00046c, 0x00000001);
510 nv_icmd(priv, 0x000470, 0x00000000);
511 nv_icmd(priv, 0x000471, 0x00000000);
512 nv_icmd(priv, 0x000472, 0x00000000);
513 nv_icmd(priv, 0x000473, 0x00000000);
514 nv_icmd(priv, 0x000474, 0x00000000);
515 nv_icmd(priv, 0x000475, 0x00000000);
516 nv_icmd(priv, 0x000476, 0x00000000);
517 nv_icmd(priv, 0x000477, 0x00000000);
518 nv_icmd(priv, 0x000478, 0x00000000);
519 nv_icmd(priv, 0x000479, 0x00000000);
520 nv_icmd(priv, 0x00047a, 0x00000000);
521 nv_icmd(priv, 0x00047b, 0x00000000);
522 nv_icmd(priv, 0x00047c, 0x00000000);
523 nv_icmd(priv, 0x00047d, 0x00000000);
524 nv_icmd(priv, 0x00047e, 0x00000000);
525 nv_icmd(priv, 0x00047f, 0x00000000);
526 nv_icmd(priv, 0x000480, 0x00000000);
527 nv_icmd(priv, 0x000481, 0x00000000);
528 nv_icmd(priv, 0x000482, 0x00000000);
529 nv_icmd(priv, 0x000483, 0x00000000);
530 nv_icmd(priv, 0x000484, 0x00000000);
531 nv_icmd(priv, 0x000485, 0x00000000);
532 nv_icmd(priv, 0x000486, 0x00000000);
533 nv_icmd(priv, 0x000487, 0x00000000);
534 nv_icmd(priv, 0x000488, 0x00000000);
535 nv_icmd(priv, 0x000489, 0x00000000);
536 nv_icmd(priv, 0x00048a, 0x00000000);
537 nv_icmd(priv, 0x00048b, 0x00000000);
538 nv_icmd(priv, 0x00048c, 0x00000000);
539 nv_icmd(priv, 0x00048d, 0x00000000);
540 nv_icmd(priv, 0x00048e, 0x00000000);
541 nv_icmd(priv, 0x00048f, 0x00000000);
542 nv_icmd(priv, 0x000490, 0x00000000);
543 nv_icmd(priv, 0x000491, 0x00000000);
544 nv_icmd(priv, 0x000492, 0x00000000);
545 nv_icmd(priv, 0x000493, 0x00000000);
546 nv_icmd(priv, 0x000494, 0x00000000);
547 nv_icmd(priv, 0x000495, 0x00000000);
548 nv_icmd(priv, 0x000496, 0x00000000);
549 nv_icmd(priv, 0x000497, 0x00000000);
550 nv_icmd(priv, 0x000498, 0x00000000);
551 nv_icmd(priv, 0x000499, 0x00000000);
552 nv_icmd(priv, 0x00049a, 0x00000000);
553 nv_icmd(priv, 0x00049b, 0x00000000);
554 nv_icmd(priv, 0x00049c, 0x00000000);
555 nv_icmd(priv, 0x00049d, 0x00000000);
556 nv_icmd(priv, 0x00049e, 0x00000000);
557 nv_icmd(priv, 0x00049f, 0x00000000);
558 nv_icmd(priv, 0x0004a0, 0x00000000);
559 nv_icmd(priv, 0x0004a1, 0x00000000);
560 nv_icmd(priv, 0x0004a2, 0x00000000);
561 nv_icmd(priv, 0x0004a3, 0x00000000);
562 nv_icmd(priv, 0x0004a4, 0x00000000);
563 nv_icmd(priv, 0x0004a5, 0x00000000);
564 nv_icmd(priv, 0x0004a6, 0x00000000);
565 nv_icmd(priv, 0x0004a7, 0x00000000);
566 nv_icmd(priv, 0x0004a8, 0x00000000);
567 nv_icmd(priv, 0x0004a9, 0x00000000);
568 nv_icmd(priv, 0x0004aa, 0x00000000);
569 nv_icmd(priv, 0x0004ab, 0x00000000);
570 nv_icmd(priv, 0x0004ac, 0x00000000);
571 nv_icmd(priv, 0x0004ad, 0x00000000);
572 nv_icmd(priv, 0x0004ae, 0x00000000);
573 nv_icmd(priv, 0x0004af, 0x00000000);
574 nv_icmd(priv, 0x0004b0, 0x00000000);
575 nv_icmd(priv, 0x0004b1, 0x00000000);
576 nv_icmd(priv, 0x0004b2, 0x00000000);
577 nv_icmd(priv, 0x0004b3, 0x00000000);
578 nv_icmd(priv, 0x0004b4, 0x00000000);
579 nv_icmd(priv, 0x0004b5, 0x00000000);
580 nv_icmd(priv, 0x0004b6, 0x00000000);
581 nv_icmd(priv, 0x0004b7, 0x00000000);
582 nv_icmd(priv, 0x0004b8, 0x00000000);
583 nv_icmd(priv, 0x0004b9, 0x00000000);
584 nv_icmd(priv, 0x0004ba, 0x00000000);
585 nv_icmd(priv, 0x0004bb, 0x00000000);
586 nv_icmd(priv, 0x0004bc, 0x00000000);
587 nv_icmd(priv, 0x0004bd, 0x00000000);
588 nv_icmd(priv, 0x0004be, 0x00000000);
589 nv_icmd(priv, 0x0004bf, 0x00000000);
590 nv_icmd(priv, 0x0004c0, 0x00000000);
591 nv_icmd(priv, 0x0004c1, 0x00000000);
592 nv_icmd(priv, 0x0004c2, 0x00000000);
593 nv_icmd(priv, 0x0004c3, 0x00000000);
594 nv_icmd(priv, 0x0004c4, 0x00000000);
595 nv_icmd(priv, 0x0004c5, 0x00000000);
596 nv_icmd(priv, 0x0004c6, 0x00000000);
597 nv_icmd(priv, 0x0004c7, 0x00000000);
598 nv_icmd(priv, 0x0004c8, 0x00000000);
599 nv_icmd(priv, 0x0004c9, 0x00000000);
600 nv_icmd(priv, 0x0004ca, 0x00000000);
601 nv_icmd(priv, 0x0004cb, 0x00000000);
602 nv_icmd(priv, 0x0004cc, 0x00000000);
603 nv_icmd(priv, 0x0004cd, 0x00000000);
604 nv_icmd(priv, 0x0004ce, 0x00000000);
605 nv_icmd(priv, 0x0004cf, 0x00000000);
606 nv_icmd(priv, 0x000510, 0x3f800000);
607 nv_icmd(priv, 0x000511, 0x3f800000);
608 nv_icmd(priv, 0x000512, 0x3f800000);
609 nv_icmd(priv, 0x000513, 0x3f800000);
610 nv_icmd(priv, 0x000514, 0x3f800000);
611 nv_icmd(priv, 0x000515, 0x3f800000);
612 nv_icmd(priv, 0x000516, 0x3f800000);
613 nv_icmd(priv, 0x000517, 0x3f800000);
614 nv_icmd(priv, 0x000518, 0x3f800000);
615 nv_icmd(priv, 0x000519, 0x3f800000);
616 nv_icmd(priv, 0x00051a, 0x3f800000);
617 nv_icmd(priv, 0x00051b, 0x3f800000);
618 nv_icmd(priv, 0x00051c, 0x3f800000);
619 nv_icmd(priv, 0x00051d, 0x3f800000);
620 nv_icmd(priv, 0x00051e, 0x3f800000);
621 nv_icmd(priv, 0x00051f, 0x3f800000);
622 nv_icmd(priv, 0x000520, 0x000002b6);
623 nv_icmd(priv, 0x000529, 0x00000001);
624 nv_icmd(priv, 0x000530, 0xffff0000);
625 nv_icmd(priv, 0x000531, 0xffff0000);
626 nv_icmd(priv, 0x000532, 0xffff0000);
627 nv_icmd(priv, 0x000533, 0xffff0000);
628 nv_icmd(priv, 0x000534, 0xffff0000);
629 nv_icmd(priv, 0x000535, 0xffff0000);
630 nv_icmd(priv, 0x000536, 0xffff0000);
631 nv_icmd(priv, 0x000537, 0xffff0000);
632 nv_icmd(priv, 0x000538, 0xffff0000);
633 nv_icmd(priv, 0x000539, 0xffff0000);
634 nv_icmd(priv, 0x00053a, 0xffff0000);
635 nv_icmd(priv, 0x00053b, 0xffff0000);
636 nv_icmd(priv, 0x00053c, 0xffff0000);
637 nv_icmd(priv, 0x00053d, 0xffff0000);
638 nv_icmd(priv, 0x00053e, 0xffff0000);
639 nv_icmd(priv, 0x00053f, 0xffff0000);
640 nv_icmd(priv, 0x000585, 0x0000003f);
641 nv_icmd(priv, 0x000576, 0x00000003);
642 nv_icmd(priv, 0x00057b, 0x00000059);
643 nv_icmd(priv, 0x000586, 0x00000040);
644 nv_icmd(priv, 0x000582, 0x00000080);
645 nv_icmd(priv, 0x000583, 0x00000080);
646 nv_icmd(priv, 0x0005c2, 0x00000001);
647 nv_icmd(priv, 0x000638, 0x00000001);
648 nv_icmd(priv, 0x000639, 0x00000001);
649 nv_icmd(priv, 0x00063a, 0x00000002);
650 nv_icmd(priv, 0x00063b, 0x00000001);
651 nv_icmd(priv, 0x00063c, 0x00000001);
652 nv_icmd(priv, 0x00063d, 0x00000002);
653 nv_icmd(priv, 0x00063e, 0x00000001);
654 nv_icmd(priv, 0x0008b8, 0x00000001);
655 nv_icmd(priv, 0x0008b9, 0x00000001);
656 nv_icmd(priv, 0x0008ba, 0x00000001);
657 nv_icmd(priv, 0x0008bb, 0x00000001);
658 nv_icmd(priv, 0x0008bc, 0x00000001);
659 nv_icmd(priv, 0x0008bd, 0x00000001);
660 nv_icmd(priv, 0x0008be, 0x00000001);
661 nv_icmd(priv, 0x0008bf, 0x00000001);
662 nv_icmd(priv, 0x000900, 0x00000001);
663 nv_icmd(priv, 0x000901, 0x00000001);
664 nv_icmd(priv, 0x000902, 0x00000001);
665 nv_icmd(priv, 0x000903, 0x00000001);
666 nv_icmd(priv, 0x000904, 0x00000001);
667 nv_icmd(priv, 0x000905, 0x00000001);
668 nv_icmd(priv, 0x000906, 0x00000001);
669 nv_icmd(priv, 0x000907, 0x00000001);
670 nv_icmd(priv, 0x000908, 0x00000002);
671 nv_icmd(priv, 0x000909, 0x00000002);
672 nv_icmd(priv, 0x00090a, 0x00000002);
673 nv_icmd(priv, 0x00090b, 0x00000002);
674 nv_icmd(priv, 0x00090c, 0x00000002);
675 nv_icmd(priv, 0x00090d, 0x00000002);
676 nv_icmd(priv, 0x00090e, 0x00000002);
677 nv_icmd(priv, 0x00090f, 0x00000002);
678 nv_icmd(priv, 0x000910, 0x00000001);
679 nv_icmd(priv, 0x000911, 0x00000001);
680 nv_icmd(priv, 0x000912, 0x00000001);
681 nv_icmd(priv, 0x000913, 0x00000001);
682 nv_icmd(priv, 0x000914, 0x00000001);
683 nv_icmd(priv, 0x000915, 0x00000001);
684 nv_icmd(priv, 0x000916, 0x00000001);
685 nv_icmd(priv, 0x000917, 0x00000001);
686 nv_icmd(priv, 0x000918, 0x00000001);
687 nv_icmd(priv, 0x000919, 0x00000001);
688 nv_icmd(priv, 0x00091a, 0x00000001);
689 nv_icmd(priv, 0x00091b, 0x00000001);
690 nv_icmd(priv, 0x00091c, 0x00000001);
691 nv_icmd(priv, 0x00091d, 0x00000001);
692 nv_icmd(priv, 0x00091e, 0x00000001);
693 nv_icmd(priv, 0x00091f, 0x00000001);
694 nv_icmd(priv, 0x000920, 0x00000002);
695 nv_icmd(priv, 0x000921, 0x00000002);
696 nv_icmd(priv, 0x000922, 0x00000002);
697 nv_icmd(priv, 0x000923, 0x00000002);
698 nv_icmd(priv, 0x000924, 0x00000002);
699 nv_icmd(priv, 0x000925, 0x00000002);
700 nv_icmd(priv, 0x000926, 0x00000002);
701 nv_icmd(priv, 0x000927, 0x00000002);
702 nv_icmd(priv, 0x000928, 0x00000001);
703 nv_icmd(priv, 0x000929, 0x00000001);
704 nv_icmd(priv, 0x00092a, 0x00000001);
705 nv_icmd(priv, 0x00092b, 0x00000001);
706 nv_icmd(priv, 0x00092c, 0x00000001);
707 nv_icmd(priv, 0x00092d, 0x00000001);
708 nv_icmd(priv, 0x00092e, 0x00000001);
709 nv_icmd(priv, 0x00092f, 0x00000001);
710 nv_icmd(priv, 0x000648, 0x00000001);
711 nv_icmd(priv, 0x000649, 0x00000001);
712 nv_icmd(priv, 0x00064a, 0x00000001);
713 nv_icmd(priv, 0x00064b, 0x00000001);
714 nv_icmd(priv, 0x00064c, 0x00000001);
715 nv_icmd(priv, 0x00064d, 0x00000001);
716 nv_icmd(priv, 0x00064e, 0x00000001);
717 nv_icmd(priv, 0x00064f, 0x00000001);
718 nv_icmd(priv, 0x000650, 0x00000001);
719 nv_icmd(priv, 0x000658, 0x0000000f);
720 nv_icmd(priv, 0x0007ff, 0x0000000a);
721 nv_icmd(priv, 0x00066a, 0x40000000);
722 nv_icmd(priv, 0x00066b, 0x10000000);
723 nv_icmd(priv, 0x00066c, 0xffff0000);
724 nv_icmd(priv, 0x00066d, 0xffff0000);
725 nv_icmd(priv, 0x0007af, 0x00000008);
726 nv_icmd(priv, 0x0007b0, 0x00000008);
727 nv_icmd(priv, 0x0007f6, 0x00000001);
728 nv_icmd(priv, 0x0006b2, 0x00000055);
729 nv_icmd(priv, 0x0007ad, 0x00000003);
730 nv_icmd(priv, 0x000937, 0x00000001);
731 nv_icmd(priv, 0x000971, 0x00000008);
732 nv_icmd(priv, 0x000972, 0x00000040);
733 nv_icmd(priv, 0x000973, 0x0000012c);
734 nv_icmd(priv, 0x00097c, 0x00000040);
735 nv_icmd(priv, 0x000979, 0x00000003);
736 nv_icmd(priv, 0x000975, 0x00000020);
737 nv_icmd(priv, 0x000976, 0x00000001);
738 nv_icmd(priv, 0x000977, 0x00000020);
739 nv_icmd(priv, 0x000978, 0x00000001);
740 nv_icmd(priv, 0x000957, 0x00000003);
741 nv_icmd(priv, 0x00095e, 0x20164010);
742 nv_icmd(priv, 0x00095f, 0x00000020);
743 nv_icmd(priv, 0x00097d, 0x00000020);
744 nv_icmd(priv, 0x000683, 0x00000006);
745 nv_icmd(priv, 0x000685, 0x003fffff);
746 nv_icmd(priv, 0x000687, 0x003fffff);
747 nv_icmd(priv, 0x0006a0, 0x00000005);
748 nv_icmd(priv, 0x000840, 0x00400008);
749 nv_icmd(priv, 0x000841, 0x08000080);
750 nv_icmd(priv, 0x000842, 0x00400008);
751 nv_icmd(priv, 0x000843, 0x08000080);
752 nv_icmd(priv, 0x000818, 0x00000000);
753 nv_icmd(priv, 0x000819, 0x00000000);
754 nv_icmd(priv, 0x00081a, 0x00000000);
755 nv_icmd(priv, 0x00081b, 0x00000000);
756 nv_icmd(priv, 0x00081c, 0x00000000);
757 nv_icmd(priv, 0x00081d, 0x00000000);
758 nv_icmd(priv, 0x00081e, 0x00000000);
759 nv_icmd(priv, 0x00081f, 0x00000000);
760 nv_icmd(priv, 0x000848, 0x00000000);
761 nv_icmd(priv, 0x000849, 0x00000000);
762 nv_icmd(priv, 0x00084a, 0x00000000);
763 nv_icmd(priv, 0x00084b, 0x00000000);
764 nv_icmd(priv, 0x00084c, 0x00000000);
765 nv_icmd(priv, 0x00084d, 0x00000000);
766 nv_icmd(priv, 0x00084e, 0x00000000);
767 nv_icmd(priv, 0x00084f, 0x00000000);
768 nv_icmd(priv, 0x000850, 0x00000000);
769 nv_icmd(priv, 0x000851, 0x00000000);
770 nv_icmd(priv, 0x000852, 0x00000000);
771 nv_icmd(priv, 0x000853, 0x00000000);
772 nv_icmd(priv, 0x000854, 0x00000000);
773 nv_icmd(priv, 0x000855, 0x00000000);
774 nv_icmd(priv, 0x000856, 0x00000000);
775 nv_icmd(priv, 0x000857, 0x00000000);
776 nv_icmd(priv, 0x000738, 0x00000000);
777 nv_icmd(priv, 0x0006aa, 0x00000001);
778 nv_icmd(priv, 0x0006ab, 0x00000002);
779 nv_icmd(priv, 0x0006ac, 0x00000080);
780 nv_icmd(priv, 0x0006ad, 0x00000100);
781 nv_icmd(priv, 0x0006ae, 0x00000100);
782 nv_icmd(priv, 0x0006b1, 0x00000011);
783 nv_icmd(priv, 0x0006bb, 0x000000cf);
784 nv_icmd(priv, 0x0006ce, 0x2a712488);
785 nv_icmd(priv, 0x000739, 0x4085c000);
786 nv_icmd(priv, 0x00073a, 0x00000080);
787 nv_icmd(priv, 0x000786, 0x80000100);
788 nv_icmd(priv, 0x00073c, 0x00010100);
789 nv_icmd(priv, 0x00073d, 0x02800000);
790 nv_icmd(priv, 0x000787, 0x000000cf);
791 nv_icmd(priv, 0x00078c, 0x00000008);
792 nv_icmd(priv, 0x000792, 0x00000001);
793 nv_icmd(priv, 0x000794, 0x00000001);
794 nv_icmd(priv, 0x000795, 0x00000001);
795 nv_icmd(priv, 0x000796, 0x00000001);
796 nv_icmd(priv, 0x000797, 0x000000cf);
797 nv_icmd(priv, 0x000836, 0x00000001);
798 nv_icmd(priv, 0x00079a, 0x00000002);
799 nv_icmd(priv, 0x000833, 0x04444480);
800 nv_icmd(priv, 0x0007a1, 0x00000001);
801 nv_icmd(priv, 0x0007a3, 0x00000001);
802 nv_icmd(priv, 0x0007a4, 0x00000001);
803 nv_icmd(priv, 0x0007a5, 0x00000001);
804 nv_icmd(priv, 0x000831, 0x00000004);
805 nv_icmd(priv, 0x000b07, 0x00000002);
806 nv_icmd(priv, 0x000b08, 0x00000100);
807 nv_icmd(priv, 0x000b09, 0x00000100);
808 nv_icmd(priv, 0x000b0a, 0x00000001);
809 nv_icmd(priv, 0x000a04, 0x000000ff);
810 nv_icmd(priv, 0x000a0b, 0x00000040);
811 nv_icmd(priv, 0x00097f, 0x00000100);
812 nv_icmd(priv, 0x000a02, 0x00000001);
813 nv_icmd(priv, 0x000809, 0x00000007);
814 nv_icmd(priv, 0x00c221, 0x00000040);
815 nv_icmd(priv, 0x00c1b0, 0x0000000f);
816 nv_icmd(priv, 0x00c1b1, 0x0000000f);
817 nv_icmd(priv, 0x00c1b2, 0x0000000f);
818 nv_icmd(priv, 0x00c1b3, 0x0000000f);
819 nv_icmd(priv, 0x00c1b4, 0x0000000f);
820 nv_icmd(priv, 0x00c1b5, 0x0000000f);
821 nv_icmd(priv, 0x00c1b6, 0x0000000f);
822 nv_icmd(priv, 0x00c1b7, 0x0000000f);
823 nv_icmd(priv, 0x00c1b8, 0x0fac6881);
824 nv_icmd(priv, 0x00c1b9, 0x00fac688);
825 nv_icmd(priv, 0x00c401, 0x00000001);
826 nv_icmd(priv, 0x00c402, 0x00010001);
827 nv_icmd(priv, 0x00c403, 0x00000001);
828 nv_icmd(priv, 0x00c404, 0x00000001);
829 nv_icmd(priv, 0x00c40e, 0x00000020);
830 nv_icmd(priv, 0x00c500, 0x00000003);
831 nv_icmd(priv, 0x01e100, 0x00000001);
832 nv_icmd(priv, 0x001000, 0x00000002);
833 nv_icmd(priv, 0x0006aa, 0x00000001);
834 nv_icmd(priv, 0x0006ad, 0x00000100);
835 nv_icmd(priv, 0x0006ae, 0x00000100);
836 nv_icmd(priv, 0x0006b1, 0x00000011);
837 nv_icmd(priv, 0x00078c, 0x00000008);
838 nv_icmd(priv, 0x000792, 0x00000001);
839 nv_icmd(priv, 0x000794, 0x00000001);
840 nv_icmd(priv, 0x000795, 0x00000001);
841 nv_icmd(priv, 0x000796, 0x00000001);
842 nv_icmd(priv, 0x000797, 0x000000cf);
843 nv_icmd(priv, 0x00079a, 0x00000002);
844 nv_icmd(priv, 0x000833, 0x04444480);
845 nv_icmd(priv, 0x0007a1, 0x00000001);
846 nv_icmd(priv, 0x0007a3, 0x00000001);
847 nv_icmd(priv, 0x0007a4, 0x00000001);
848 nv_icmd(priv, 0x0007a5, 0x00000001);
849 nv_icmd(priv, 0x000831, 0x00000004);
850 nv_icmd(priv, 0x01e100, 0x00000001);
851 nv_icmd(priv, 0x001000, 0x00000008);
852 nv_icmd(priv, 0x000039, 0x00000000);
853 nv_icmd(priv, 0x00003a, 0x00000000);
854 nv_icmd(priv, 0x00003b, 0x00000000);
855 nv_icmd(priv, 0x000380, 0x00000001);
856 nv_icmd(priv, 0x000366, 0x00000000);
857 nv_icmd(priv, 0x000367, 0x00000000);
858 nv_icmd(priv, 0x000368, 0x00000fff);
859 nv_icmd(priv, 0x000370, 0x00000000);
860 nv_icmd(priv, 0x000371, 0x00000000);
861 nv_icmd(priv, 0x000372, 0x000fffff);
862 nv_icmd(priv, 0x000813, 0x00000006);
863 nv_icmd(priv, 0x000814, 0x00000008);
864 nv_icmd(priv, 0x000957, 0x00000003);
865 nv_icmd(priv, 0x000818, 0x00000000);
866 nv_icmd(priv, 0x000819, 0x00000000);
867 nv_icmd(priv, 0x00081a, 0x00000000);
868 nv_icmd(priv, 0x00081b, 0x00000000);
869 nv_icmd(priv, 0x00081c, 0x00000000);
870 nv_icmd(priv, 0x00081d, 0x00000000);
871 nv_icmd(priv, 0x00081e, 0x00000000);
872 nv_icmd(priv, 0x00081f, 0x00000000);
873 nv_icmd(priv, 0x000848, 0x00000000);
874 nv_icmd(priv, 0x000849, 0x00000000);
875 nv_icmd(priv, 0x00084a, 0x00000000);
876 nv_icmd(priv, 0x00084b, 0x00000000);
877 nv_icmd(priv, 0x00084c, 0x00000000);
878 nv_icmd(priv, 0x00084d, 0x00000000);
879 nv_icmd(priv, 0x00084e, 0x00000000);
880 nv_icmd(priv, 0x00084f, 0x00000000);
881 nv_icmd(priv, 0x000850, 0x00000000);
882 nv_icmd(priv, 0x000851, 0x00000000);
883 nv_icmd(priv, 0x000852, 0x00000000);
884 nv_icmd(priv, 0x000853, 0x00000000);
885 nv_icmd(priv, 0x000854, 0x00000000);
886 nv_icmd(priv, 0x000855, 0x00000000);
887 nv_icmd(priv, 0x000856, 0x00000000);
888 nv_icmd(priv, 0x000857, 0x00000000);
889 nv_icmd(priv, 0x000738, 0x00000000);
890 nv_icmd(priv, 0x000b07, 0x00000002);
891 nv_icmd(priv, 0x000b08, 0x00000100);
892 nv_icmd(priv, 0x000b09, 0x00000100);
893 nv_icmd(priv, 0x000b0a, 0x00000001);
894 nv_icmd(priv, 0x000a04, 0x000000ff);
895 nv_icmd(priv, 0x00097f, 0x00000100);
896 nv_icmd(priv, 0x000a02, 0x00000001);
897 nv_icmd(priv, 0x000809, 0x00000007);
898 nv_icmd(priv, 0x00c221, 0x00000040);
899 nv_icmd(priv, 0x00c401, 0x00000001);
900 nv_icmd(priv, 0x00c402, 0x00010001);
901 nv_icmd(priv, 0x00c403, 0x00000001);
902 nv_icmd(priv, 0x00c404, 0x00000001);
903 nv_icmd(priv, 0x00c40e, 0x00000020);
904 nv_icmd(priv, 0x00c500, 0x00000003);
905 nv_icmd(priv, 0x01e100, 0x00000001);
906 nv_icmd(priv, 0x001000, 0x00000001);
907 nv_icmd(priv, 0x000b07, 0x00000002);
908 nv_icmd(priv, 0x000b08, 0x00000100);
909 nv_icmd(priv, 0x000b09, 0x00000100);
910 nv_icmd(priv, 0x000b0a, 0x00000001);
911 nv_icmd(priv, 0x01e100, 0x00000001);
912 nv_wr32(priv, 0x400208, 0x00000000);
913}
914
915static void
916nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
917{
918 nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
919 nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
920 nv_mthd(priv, 0xa097, 0x0880, 0x00000000);
921 nv_mthd(priv, 0xa097, 0x08c0, 0x00000000);
922 nv_mthd(priv, 0xa097, 0x0900, 0x00000000);
923 nv_mthd(priv, 0xa097, 0x0940, 0x00000000);
924 nv_mthd(priv, 0xa097, 0x0980, 0x00000000);
925 nv_mthd(priv, 0xa097, 0x09c0, 0x00000000);
926 nv_mthd(priv, 0xa097, 0x0804, 0x00000000);
927 nv_mthd(priv, 0xa097, 0x0844, 0x00000000);
928 nv_mthd(priv, 0xa097, 0x0884, 0x00000000);
929 nv_mthd(priv, 0xa097, 0x08c4, 0x00000000);
930 nv_mthd(priv, 0xa097, 0x0904, 0x00000000);
931 nv_mthd(priv, 0xa097, 0x0944, 0x00000000);
932 nv_mthd(priv, 0xa097, 0x0984, 0x00000000);
933 nv_mthd(priv, 0xa097, 0x09c4, 0x00000000);
934 nv_mthd(priv, 0xa097, 0x0808, 0x00000400);
935 nv_mthd(priv, 0xa097, 0x0848, 0x00000400);
936 nv_mthd(priv, 0xa097, 0x0888, 0x00000400);
937 nv_mthd(priv, 0xa097, 0x08c8, 0x00000400);
938 nv_mthd(priv, 0xa097, 0x0908, 0x00000400);
939 nv_mthd(priv, 0xa097, 0x0948, 0x00000400);
940 nv_mthd(priv, 0xa097, 0x0988, 0x00000400);
941 nv_mthd(priv, 0xa097, 0x09c8, 0x00000400);
942 nv_mthd(priv, 0xa097, 0x080c, 0x00000300);
943 nv_mthd(priv, 0xa097, 0x084c, 0x00000300);
944 nv_mthd(priv, 0xa097, 0x088c, 0x00000300);
945 nv_mthd(priv, 0xa097, 0x08cc, 0x00000300);
946 nv_mthd(priv, 0xa097, 0x090c, 0x00000300);
947 nv_mthd(priv, 0xa097, 0x094c, 0x00000300);
948 nv_mthd(priv, 0xa097, 0x098c, 0x00000300);
949 nv_mthd(priv, 0xa097, 0x09cc, 0x00000300);
950 nv_mthd(priv, 0xa097, 0x0810, 0x000000cf);
951 nv_mthd(priv, 0xa097, 0x0850, 0x00000000);
952 nv_mthd(priv, 0xa097, 0x0890, 0x00000000);
953 nv_mthd(priv, 0xa097, 0x08d0, 0x00000000);
954 nv_mthd(priv, 0xa097, 0x0910, 0x00000000);
955 nv_mthd(priv, 0xa097, 0x0950, 0x00000000);
956 nv_mthd(priv, 0xa097, 0x0990, 0x00000000);
957 nv_mthd(priv, 0xa097, 0x09d0, 0x00000000);
958 nv_mthd(priv, 0xa097, 0x0814, 0x00000040);
959 nv_mthd(priv, 0xa097, 0x0854, 0x00000040);
960 nv_mthd(priv, 0xa097, 0x0894, 0x00000040);
961 nv_mthd(priv, 0xa097, 0x08d4, 0x00000040);
962 nv_mthd(priv, 0xa097, 0x0914, 0x00000040);
963 nv_mthd(priv, 0xa097, 0x0954, 0x00000040);
964 nv_mthd(priv, 0xa097, 0x0994, 0x00000040);
965 nv_mthd(priv, 0xa097, 0x09d4, 0x00000040);
966 nv_mthd(priv, 0xa097, 0x0818, 0x00000001);
967 nv_mthd(priv, 0xa097, 0x0858, 0x00000001);
968 nv_mthd(priv, 0xa097, 0x0898, 0x00000001);
969 nv_mthd(priv, 0xa097, 0x08d8, 0x00000001);
970 nv_mthd(priv, 0xa097, 0x0918, 0x00000001);
971 nv_mthd(priv, 0xa097, 0x0958, 0x00000001);
972 nv_mthd(priv, 0xa097, 0x0998, 0x00000001);
973 nv_mthd(priv, 0xa097, 0x09d8, 0x00000001);
974 nv_mthd(priv, 0xa097, 0x081c, 0x00000000);
975 nv_mthd(priv, 0xa097, 0x085c, 0x00000000);
976 nv_mthd(priv, 0xa097, 0x089c, 0x00000000);
977 nv_mthd(priv, 0xa097, 0x08dc, 0x00000000);
978 nv_mthd(priv, 0xa097, 0x091c, 0x00000000);
979 nv_mthd(priv, 0xa097, 0x095c, 0x00000000);
980 nv_mthd(priv, 0xa097, 0x099c, 0x00000000);
981 nv_mthd(priv, 0xa097, 0x09dc, 0x00000000);
982 nv_mthd(priv, 0xa097, 0x0820, 0x00000000);
983 nv_mthd(priv, 0xa097, 0x0860, 0x00000000);
984 nv_mthd(priv, 0xa097, 0x08a0, 0x00000000);
985 nv_mthd(priv, 0xa097, 0x08e0, 0x00000000);
986 nv_mthd(priv, 0xa097, 0x0920, 0x00000000);
987 nv_mthd(priv, 0xa097, 0x0960, 0x00000000);
988 nv_mthd(priv, 0xa097, 0x09a0, 0x00000000);
989 nv_mthd(priv, 0xa097, 0x09e0, 0x00000000);
990 nv_mthd(priv, 0xa097, 0x1c00, 0x00000000);
991 nv_mthd(priv, 0xa097, 0x1c10, 0x00000000);
992 nv_mthd(priv, 0xa097, 0x1c20, 0x00000000);
993 nv_mthd(priv, 0xa097, 0x1c30, 0x00000000);
994 nv_mthd(priv, 0xa097, 0x1c40, 0x00000000);
995 nv_mthd(priv, 0xa097, 0x1c50, 0x00000000);
996 nv_mthd(priv, 0xa097, 0x1c60, 0x00000000);
997 nv_mthd(priv, 0xa097, 0x1c70, 0x00000000);
998 nv_mthd(priv, 0xa097, 0x1c80, 0x00000000);
999 nv_mthd(priv, 0xa097, 0x1c90, 0x00000000);
1000 nv_mthd(priv, 0xa097, 0x1ca0, 0x00000000);
1001 nv_mthd(priv, 0xa097, 0x1cb0, 0x00000000);
1002 nv_mthd(priv, 0xa097, 0x1cc0, 0x00000000);
1003 nv_mthd(priv, 0xa097, 0x1cd0, 0x00000000);
1004 nv_mthd(priv, 0xa097, 0x1ce0, 0x00000000);
1005 nv_mthd(priv, 0xa097, 0x1cf0, 0x00000000);
1006 nv_mthd(priv, 0xa097, 0x1c04, 0x00000000);
1007 nv_mthd(priv, 0xa097, 0x1c14, 0x00000000);
1008 nv_mthd(priv, 0xa097, 0x1c24, 0x00000000);
1009 nv_mthd(priv, 0xa097, 0x1c34, 0x00000000);
1010 nv_mthd(priv, 0xa097, 0x1c44, 0x00000000);
1011 nv_mthd(priv, 0xa097, 0x1c54, 0x00000000);
1012 nv_mthd(priv, 0xa097, 0x1c64, 0x00000000);
1013 nv_mthd(priv, 0xa097, 0x1c74, 0x00000000);
1014 nv_mthd(priv, 0xa097, 0x1c84, 0x00000000);
1015 nv_mthd(priv, 0xa097, 0x1c94, 0x00000000);
1016 nv_mthd(priv, 0xa097, 0x1ca4, 0x00000000);
1017 nv_mthd(priv, 0xa097, 0x1cb4, 0x00000000);
1018 nv_mthd(priv, 0xa097, 0x1cc4, 0x00000000);
1019 nv_mthd(priv, 0xa097, 0x1cd4, 0x00000000);
1020 nv_mthd(priv, 0xa097, 0x1ce4, 0x00000000);
1021 nv_mthd(priv, 0xa097, 0x1cf4, 0x00000000);
1022 nv_mthd(priv, 0xa097, 0x1c08, 0x00000000);
1023 nv_mthd(priv, 0xa097, 0x1c18, 0x00000000);
1024 nv_mthd(priv, 0xa097, 0x1c28, 0x00000000);
1025 nv_mthd(priv, 0xa097, 0x1c38, 0x00000000);
1026 nv_mthd(priv, 0xa097, 0x1c48, 0x00000000);
1027 nv_mthd(priv, 0xa097, 0x1c58, 0x00000000);
1028 nv_mthd(priv, 0xa097, 0x1c68, 0x00000000);
1029 nv_mthd(priv, 0xa097, 0x1c78, 0x00000000);
1030 nv_mthd(priv, 0xa097, 0x1c88, 0x00000000);
1031 nv_mthd(priv, 0xa097, 0x1c98, 0x00000000);
1032 nv_mthd(priv, 0xa097, 0x1ca8, 0x00000000);
1033 nv_mthd(priv, 0xa097, 0x1cb8, 0x00000000);
1034 nv_mthd(priv, 0xa097, 0x1cc8, 0x00000000);
1035 nv_mthd(priv, 0xa097, 0x1cd8, 0x00000000);
1036 nv_mthd(priv, 0xa097, 0x1ce8, 0x00000000);
1037 nv_mthd(priv, 0xa097, 0x1cf8, 0x00000000);
1038 nv_mthd(priv, 0xa097, 0x1c0c, 0x00000000);
1039 nv_mthd(priv, 0xa097, 0x1c1c, 0x00000000);
1040 nv_mthd(priv, 0xa097, 0x1c2c, 0x00000000);
1041 nv_mthd(priv, 0xa097, 0x1c3c, 0x00000000);
1042 nv_mthd(priv, 0xa097, 0x1c4c, 0x00000000);
1043 nv_mthd(priv, 0xa097, 0x1c5c, 0x00000000);
1044 nv_mthd(priv, 0xa097, 0x1c6c, 0x00000000);
1045 nv_mthd(priv, 0xa097, 0x1c7c, 0x00000000);
1046 nv_mthd(priv, 0xa097, 0x1c8c, 0x00000000);
1047 nv_mthd(priv, 0xa097, 0x1c9c, 0x00000000);
1048 nv_mthd(priv, 0xa097, 0x1cac, 0x00000000);
1049 nv_mthd(priv, 0xa097, 0x1cbc, 0x00000000);
1050 nv_mthd(priv, 0xa097, 0x1ccc, 0x00000000);
1051 nv_mthd(priv, 0xa097, 0x1cdc, 0x00000000);
1052 nv_mthd(priv, 0xa097, 0x1cec, 0x00000000);
1053 nv_mthd(priv, 0xa097, 0x1cfc, 0x00000000);
1054 nv_mthd(priv, 0xa097, 0x1d00, 0x00000000);
1055 nv_mthd(priv, 0xa097, 0x1d10, 0x00000000);
1056 nv_mthd(priv, 0xa097, 0x1d20, 0x00000000);
1057 nv_mthd(priv, 0xa097, 0x1d30, 0x00000000);
1058 nv_mthd(priv, 0xa097, 0x1d40, 0x00000000);
1059 nv_mthd(priv, 0xa097, 0x1d50, 0x00000000);
1060 nv_mthd(priv, 0xa097, 0x1d60, 0x00000000);
1061 nv_mthd(priv, 0xa097, 0x1d70, 0x00000000);
1062 nv_mthd(priv, 0xa097, 0x1d80, 0x00000000);
1063 nv_mthd(priv, 0xa097, 0x1d90, 0x00000000);
1064 nv_mthd(priv, 0xa097, 0x1da0, 0x00000000);
1065 nv_mthd(priv, 0xa097, 0x1db0, 0x00000000);
1066 nv_mthd(priv, 0xa097, 0x1dc0, 0x00000000);
1067 nv_mthd(priv, 0xa097, 0x1dd0, 0x00000000);
1068 nv_mthd(priv, 0xa097, 0x1de0, 0x00000000);
1069 nv_mthd(priv, 0xa097, 0x1df0, 0x00000000);
1070 nv_mthd(priv, 0xa097, 0x1d04, 0x00000000);
1071 nv_mthd(priv, 0xa097, 0x1d14, 0x00000000);
1072 nv_mthd(priv, 0xa097, 0x1d24, 0x00000000);
1073 nv_mthd(priv, 0xa097, 0x1d34, 0x00000000);
1074 nv_mthd(priv, 0xa097, 0x1d44, 0x00000000);
1075 nv_mthd(priv, 0xa097, 0x1d54, 0x00000000);
1076 nv_mthd(priv, 0xa097, 0x1d64, 0x00000000);
1077 nv_mthd(priv, 0xa097, 0x1d74, 0x00000000);
1078 nv_mthd(priv, 0xa097, 0x1d84, 0x00000000);
1079 nv_mthd(priv, 0xa097, 0x1d94, 0x00000000);
1080 nv_mthd(priv, 0xa097, 0x1da4, 0x00000000);
1081 nv_mthd(priv, 0xa097, 0x1db4, 0x00000000);
1082 nv_mthd(priv, 0xa097, 0x1dc4, 0x00000000);
1083 nv_mthd(priv, 0xa097, 0x1dd4, 0x00000000);
1084 nv_mthd(priv, 0xa097, 0x1de4, 0x00000000);
1085 nv_mthd(priv, 0xa097, 0x1df4, 0x00000000);
1086 nv_mthd(priv, 0xa097, 0x1d08, 0x00000000);
1087 nv_mthd(priv, 0xa097, 0x1d18, 0x00000000);
1088 nv_mthd(priv, 0xa097, 0x1d28, 0x00000000);
1089 nv_mthd(priv, 0xa097, 0x1d38, 0x00000000);
1090 nv_mthd(priv, 0xa097, 0x1d48, 0x00000000);
1091 nv_mthd(priv, 0xa097, 0x1d58, 0x00000000);
1092 nv_mthd(priv, 0xa097, 0x1d68, 0x00000000);
1093 nv_mthd(priv, 0xa097, 0x1d78, 0x00000000);
1094 nv_mthd(priv, 0xa097, 0x1d88, 0x00000000);
1095 nv_mthd(priv, 0xa097, 0x1d98, 0x00000000);
1096 nv_mthd(priv, 0xa097, 0x1da8, 0x00000000);
1097 nv_mthd(priv, 0xa097, 0x1db8, 0x00000000);
1098 nv_mthd(priv, 0xa097, 0x1dc8, 0x00000000);
1099 nv_mthd(priv, 0xa097, 0x1dd8, 0x00000000);
1100 nv_mthd(priv, 0xa097, 0x1de8, 0x00000000);
1101 nv_mthd(priv, 0xa097, 0x1df8, 0x00000000);
1102 nv_mthd(priv, 0xa097, 0x1d0c, 0x00000000);
1103 nv_mthd(priv, 0xa097, 0x1d1c, 0x00000000);
1104 nv_mthd(priv, 0xa097, 0x1d2c, 0x00000000);
1105 nv_mthd(priv, 0xa097, 0x1d3c, 0x00000000);
1106 nv_mthd(priv, 0xa097, 0x1d4c, 0x00000000);
1107 nv_mthd(priv, 0xa097, 0x1d5c, 0x00000000);
1108 nv_mthd(priv, 0xa097, 0x1d6c, 0x00000000);
1109 nv_mthd(priv, 0xa097, 0x1d7c, 0x00000000);
1110 nv_mthd(priv, 0xa097, 0x1d8c, 0x00000000);
1111 nv_mthd(priv, 0xa097, 0x1d9c, 0x00000000);
1112 nv_mthd(priv, 0xa097, 0x1dac, 0x00000000);
1113 nv_mthd(priv, 0xa097, 0x1dbc, 0x00000000);
1114 nv_mthd(priv, 0xa097, 0x1dcc, 0x00000000);
1115 nv_mthd(priv, 0xa097, 0x1ddc, 0x00000000);
1116 nv_mthd(priv, 0xa097, 0x1dec, 0x00000000);
1117 nv_mthd(priv, 0xa097, 0x1dfc, 0x00000000);
1118 nv_mthd(priv, 0xa097, 0x1f00, 0x00000000);
1119 nv_mthd(priv, 0xa097, 0x1f08, 0x00000000);
1120 nv_mthd(priv, 0xa097, 0x1f10, 0x00000000);
1121 nv_mthd(priv, 0xa097, 0x1f18, 0x00000000);
1122 nv_mthd(priv, 0xa097, 0x1f20, 0x00000000);
1123 nv_mthd(priv, 0xa097, 0x1f28, 0x00000000);
1124 nv_mthd(priv, 0xa097, 0x1f30, 0x00000000);
1125 nv_mthd(priv, 0xa097, 0x1f38, 0x00000000);
1126 nv_mthd(priv, 0xa097, 0x1f40, 0x00000000);
1127 nv_mthd(priv, 0xa097, 0x1f48, 0x00000000);
1128 nv_mthd(priv, 0xa097, 0x1f50, 0x00000000);
1129 nv_mthd(priv, 0xa097, 0x1f58, 0x00000000);
1130 nv_mthd(priv, 0xa097, 0x1f60, 0x00000000);
1131 nv_mthd(priv, 0xa097, 0x1f68, 0x00000000);
1132 nv_mthd(priv, 0xa097, 0x1f70, 0x00000000);
1133 nv_mthd(priv, 0xa097, 0x1f78, 0x00000000);
1134 nv_mthd(priv, 0xa097, 0x1f04, 0x00000000);
1135 nv_mthd(priv, 0xa097, 0x1f0c, 0x00000000);
1136 nv_mthd(priv, 0xa097, 0x1f14, 0x00000000);
1137 nv_mthd(priv, 0xa097, 0x1f1c, 0x00000000);
1138 nv_mthd(priv, 0xa097, 0x1f24, 0x00000000);
1139 nv_mthd(priv, 0xa097, 0x1f2c, 0x00000000);
1140 nv_mthd(priv, 0xa097, 0x1f34, 0x00000000);
1141 nv_mthd(priv, 0xa097, 0x1f3c, 0x00000000);
1142 nv_mthd(priv, 0xa097, 0x1f44, 0x00000000);
1143 nv_mthd(priv, 0xa097, 0x1f4c, 0x00000000);
1144 nv_mthd(priv, 0xa097, 0x1f54, 0x00000000);
1145 nv_mthd(priv, 0xa097, 0x1f5c, 0x00000000);
1146 nv_mthd(priv, 0xa097, 0x1f64, 0x00000000);
1147 nv_mthd(priv, 0xa097, 0x1f6c, 0x00000000);
1148 nv_mthd(priv, 0xa097, 0x1f74, 0x00000000);
1149 nv_mthd(priv, 0xa097, 0x1f7c, 0x00000000);
1150 nv_mthd(priv, 0xa097, 0x1f80, 0x00000000);
1151 nv_mthd(priv, 0xa097, 0x1f88, 0x00000000);
1152 nv_mthd(priv, 0xa097, 0x1f90, 0x00000000);
1153 nv_mthd(priv, 0xa097, 0x1f98, 0x00000000);
1154 nv_mthd(priv, 0xa097, 0x1fa0, 0x00000000);
1155 nv_mthd(priv, 0xa097, 0x1fa8, 0x00000000);
1156 nv_mthd(priv, 0xa097, 0x1fb0, 0x00000000);
1157 nv_mthd(priv, 0xa097, 0x1fb8, 0x00000000);
1158 nv_mthd(priv, 0xa097, 0x1fc0, 0x00000000);
1159 nv_mthd(priv, 0xa097, 0x1fc8, 0x00000000);
1160 nv_mthd(priv, 0xa097, 0x1fd0, 0x00000000);
1161 nv_mthd(priv, 0xa097, 0x1fd8, 0x00000000);
1162 nv_mthd(priv, 0xa097, 0x1fe0, 0x00000000);
1163 nv_mthd(priv, 0xa097, 0x1fe8, 0x00000000);
1164 nv_mthd(priv, 0xa097, 0x1ff0, 0x00000000);
1165 nv_mthd(priv, 0xa097, 0x1ff8, 0x00000000);
1166 nv_mthd(priv, 0xa097, 0x1f84, 0x00000000);
1167 nv_mthd(priv, 0xa097, 0x1f8c, 0x00000000);
1168 nv_mthd(priv, 0xa097, 0x1f94, 0x00000000);
1169 nv_mthd(priv, 0xa097, 0x1f9c, 0x00000000);
1170 nv_mthd(priv, 0xa097, 0x1fa4, 0x00000000);
1171 nv_mthd(priv, 0xa097, 0x1fac, 0x00000000);
1172 nv_mthd(priv, 0xa097, 0x1fb4, 0x00000000);
1173 nv_mthd(priv, 0xa097, 0x1fbc, 0x00000000);
1174 nv_mthd(priv, 0xa097, 0x1fc4, 0x00000000);
1175 nv_mthd(priv, 0xa097, 0x1fcc, 0x00000000);
1176 nv_mthd(priv, 0xa097, 0x1fd4, 0x00000000);
1177 nv_mthd(priv, 0xa097, 0x1fdc, 0x00000000);
1178 nv_mthd(priv, 0xa097, 0x1fe4, 0x00000000);
1179 nv_mthd(priv, 0xa097, 0x1fec, 0x00000000);
1180 nv_mthd(priv, 0xa097, 0x1ff4, 0x00000000);
1181 nv_mthd(priv, 0xa097, 0x1ffc, 0x00000000);
1182 nv_mthd(priv, 0xa097, 0x2000, 0x00000000);
1183 nv_mthd(priv, 0xa097, 0x2040, 0x00000011);
1184 nv_mthd(priv, 0xa097, 0x2080, 0x00000020);
1185 nv_mthd(priv, 0xa097, 0x20c0, 0x00000030);
1186 nv_mthd(priv, 0xa097, 0x2100, 0x00000040);
1187 nv_mthd(priv, 0xa097, 0x2140, 0x00000051);
1188 nv_mthd(priv, 0xa097, 0x200c, 0x00000001);
1189 nv_mthd(priv, 0xa097, 0x204c, 0x00000001);
1190 nv_mthd(priv, 0xa097, 0x208c, 0x00000001);
1191 nv_mthd(priv, 0xa097, 0x20cc, 0x00000001);
1192 nv_mthd(priv, 0xa097, 0x210c, 0x00000001);
1193 nv_mthd(priv, 0xa097, 0x214c, 0x00000001);
1194 nv_mthd(priv, 0xa097, 0x2010, 0x00000000);
1195 nv_mthd(priv, 0xa097, 0x2050, 0x00000000);
1196 nv_mthd(priv, 0xa097, 0x2090, 0x00000001);
1197 nv_mthd(priv, 0xa097, 0x20d0, 0x00000002);
1198 nv_mthd(priv, 0xa097, 0x2110, 0x00000003);
1199 nv_mthd(priv, 0xa097, 0x2150, 0x00000004);
1200 nv_mthd(priv, 0xa097, 0x0380, 0x00000000);
1201 nv_mthd(priv, 0xa097, 0x03a0, 0x00000000);
1202 nv_mthd(priv, 0xa097, 0x03c0, 0x00000000);
1203 nv_mthd(priv, 0xa097, 0x03e0, 0x00000000);
1204 nv_mthd(priv, 0xa097, 0x0384, 0x00000000);
1205 nv_mthd(priv, 0xa097, 0x03a4, 0x00000000);
1206 nv_mthd(priv, 0xa097, 0x03c4, 0x00000000);
1207 nv_mthd(priv, 0xa097, 0x03e4, 0x00000000);
1208 nv_mthd(priv, 0xa097, 0x0388, 0x00000000);
1209 nv_mthd(priv, 0xa097, 0x03a8, 0x00000000);
1210 nv_mthd(priv, 0xa097, 0x03c8, 0x00000000);
1211 nv_mthd(priv, 0xa097, 0x03e8, 0x00000000);
1212 nv_mthd(priv, 0xa097, 0x038c, 0x00000000);
1213 nv_mthd(priv, 0xa097, 0x03ac, 0x00000000);
1214 nv_mthd(priv, 0xa097, 0x03cc, 0x00000000);
1215 nv_mthd(priv, 0xa097, 0x03ec, 0x00000000);
1216 nv_mthd(priv, 0xa097, 0x0700, 0x00000000);
1217 nv_mthd(priv, 0xa097, 0x0710, 0x00000000);
1218 nv_mthd(priv, 0xa097, 0x0720, 0x00000000);
1219 nv_mthd(priv, 0xa097, 0x0730, 0x00000000);
1220 nv_mthd(priv, 0xa097, 0x0704, 0x00000000);
1221 nv_mthd(priv, 0xa097, 0x0714, 0x00000000);
1222 nv_mthd(priv, 0xa097, 0x0724, 0x00000000);
1223 nv_mthd(priv, 0xa097, 0x0734, 0x00000000);
1224 nv_mthd(priv, 0xa097, 0x0708, 0x00000000);
1225 nv_mthd(priv, 0xa097, 0x0718, 0x00000000);
1226 nv_mthd(priv, 0xa097, 0x0728, 0x00000000);
1227 nv_mthd(priv, 0xa097, 0x0738, 0x00000000);
1228 nv_mthd(priv, 0xa097, 0x2800, 0x00000000);
1229 nv_mthd(priv, 0xa097, 0x2804, 0x00000000);
1230 nv_mthd(priv, 0xa097, 0x2808, 0x00000000);
1231 nv_mthd(priv, 0xa097, 0x280c, 0x00000000);
1232 nv_mthd(priv, 0xa097, 0x2810, 0x00000000);
1233 nv_mthd(priv, 0xa097, 0x2814, 0x00000000);
1234 nv_mthd(priv, 0xa097, 0x2818, 0x00000000);
1235 nv_mthd(priv, 0xa097, 0x281c, 0x00000000);
1236 nv_mthd(priv, 0xa097, 0x2820, 0x00000000);
1237 nv_mthd(priv, 0xa097, 0x2824, 0x00000000);
1238 nv_mthd(priv, 0xa097, 0x2828, 0x00000000);
1239 nv_mthd(priv, 0xa097, 0x282c, 0x00000000);
1240 nv_mthd(priv, 0xa097, 0x2830, 0x00000000);
1241 nv_mthd(priv, 0xa097, 0x2834, 0x00000000);
1242 nv_mthd(priv, 0xa097, 0x2838, 0x00000000);
1243 nv_mthd(priv, 0xa097, 0x283c, 0x00000000);
1244 nv_mthd(priv, 0xa097, 0x2840, 0x00000000);
1245 nv_mthd(priv, 0xa097, 0x2844, 0x00000000);
1246 nv_mthd(priv, 0xa097, 0x2848, 0x00000000);
1247 nv_mthd(priv, 0xa097, 0x284c, 0x00000000);
1248 nv_mthd(priv, 0xa097, 0x2850, 0x00000000);
1249 nv_mthd(priv, 0xa097, 0x2854, 0x00000000);
1250 nv_mthd(priv, 0xa097, 0x2858, 0x00000000);
1251 nv_mthd(priv, 0xa097, 0x285c, 0x00000000);
1252 nv_mthd(priv, 0xa097, 0x2860, 0x00000000);
1253 nv_mthd(priv, 0xa097, 0x2864, 0x00000000);
1254 nv_mthd(priv, 0xa097, 0x2868, 0x00000000);
1255 nv_mthd(priv, 0xa097, 0x286c, 0x00000000);
1256 nv_mthd(priv, 0xa097, 0x2870, 0x00000000);
1257 nv_mthd(priv, 0xa097, 0x2874, 0x00000000);
1258 nv_mthd(priv, 0xa097, 0x2878, 0x00000000);
1259 nv_mthd(priv, 0xa097, 0x287c, 0x00000000);
1260 nv_mthd(priv, 0xa097, 0x2880, 0x00000000);
1261 nv_mthd(priv, 0xa097, 0x2884, 0x00000000);
1262 nv_mthd(priv, 0xa097, 0x2888, 0x00000000);
1263 nv_mthd(priv, 0xa097, 0x288c, 0x00000000);
1264 nv_mthd(priv, 0xa097, 0x2890, 0x00000000);
1265 nv_mthd(priv, 0xa097, 0x2894, 0x00000000);
1266 nv_mthd(priv, 0xa097, 0x2898, 0x00000000);
1267 nv_mthd(priv, 0xa097, 0x289c, 0x00000000);
1268 nv_mthd(priv, 0xa097, 0x28a0, 0x00000000);
1269 nv_mthd(priv, 0xa097, 0x28a4, 0x00000000);
1270 nv_mthd(priv, 0xa097, 0x28a8, 0x00000000);
1271 nv_mthd(priv, 0xa097, 0x28ac, 0x00000000);
1272 nv_mthd(priv, 0xa097, 0x28b0, 0x00000000);
1273 nv_mthd(priv, 0xa097, 0x28b4, 0x00000000);
1274 nv_mthd(priv, 0xa097, 0x28b8, 0x00000000);
1275 nv_mthd(priv, 0xa097, 0x28bc, 0x00000000);
1276 nv_mthd(priv, 0xa097, 0x28c0, 0x00000000);
1277 nv_mthd(priv, 0xa097, 0x28c4, 0x00000000);
1278 nv_mthd(priv, 0xa097, 0x28c8, 0x00000000);
1279 nv_mthd(priv, 0xa097, 0x28cc, 0x00000000);
1280 nv_mthd(priv, 0xa097, 0x28d0, 0x00000000);
1281 nv_mthd(priv, 0xa097, 0x28d4, 0x00000000);
1282 nv_mthd(priv, 0xa097, 0x28d8, 0x00000000);
1283 nv_mthd(priv, 0xa097, 0x28dc, 0x00000000);
1284 nv_mthd(priv, 0xa097, 0x28e0, 0x00000000);
1285 nv_mthd(priv, 0xa097, 0x28e4, 0x00000000);
1286 nv_mthd(priv, 0xa097, 0x28e8, 0x00000000);
1287 nv_mthd(priv, 0xa097, 0x28ec, 0x00000000);
1288 nv_mthd(priv, 0xa097, 0x28f0, 0x00000000);
1289 nv_mthd(priv, 0xa097, 0x28f4, 0x00000000);
1290 nv_mthd(priv, 0xa097, 0x28f8, 0x00000000);
1291 nv_mthd(priv, 0xa097, 0x28fc, 0x00000000);
1292 nv_mthd(priv, 0xa097, 0x2900, 0x00000000);
1293 nv_mthd(priv, 0xa097, 0x2904, 0x00000000);
1294 nv_mthd(priv, 0xa097, 0x2908, 0x00000000);
1295 nv_mthd(priv, 0xa097, 0x290c, 0x00000000);
1296 nv_mthd(priv, 0xa097, 0x2910, 0x00000000);
1297 nv_mthd(priv, 0xa097, 0x2914, 0x00000000);
1298 nv_mthd(priv, 0xa097, 0x2918, 0x00000000);
1299 nv_mthd(priv, 0xa097, 0x291c, 0x00000000);
1300 nv_mthd(priv, 0xa097, 0x2920, 0x00000000);
1301 nv_mthd(priv, 0xa097, 0x2924, 0x00000000);
1302 nv_mthd(priv, 0xa097, 0x2928, 0x00000000);
1303 nv_mthd(priv, 0xa097, 0x292c, 0x00000000);
1304 nv_mthd(priv, 0xa097, 0x2930, 0x00000000);
1305 nv_mthd(priv, 0xa097, 0x2934, 0x00000000);
1306 nv_mthd(priv, 0xa097, 0x2938, 0x00000000);
1307 nv_mthd(priv, 0xa097, 0x293c, 0x00000000);
1308 nv_mthd(priv, 0xa097, 0x2940, 0x00000000);
1309 nv_mthd(priv, 0xa097, 0x2944, 0x00000000);
1310 nv_mthd(priv, 0xa097, 0x2948, 0x00000000);
1311 nv_mthd(priv, 0xa097, 0x294c, 0x00000000);
1312 nv_mthd(priv, 0xa097, 0x2950, 0x00000000);
1313 nv_mthd(priv, 0xa097, 0x2954, 0x00000000);
1314 nv_mthd(priv, 0xa097, 0x2958, 0x00000000);
1315 nv_mthd(priv, 0xa097, 0x295c, 0x00000000);
1316 nv_mthd(priv, 0xa097, 0x2960, 0x00000000);
1317 nv_mthd(priv, 0xa097, 0x2964, 0x00000000);
1318 nv_mthd(priv, 0xa097, 0x2968, 0x00000000);
1319 nv_mthd(priv, 0xa097, 0x296c, 0x00000000);
1320 nv_mthd(priv, 0xa097, 0x2970, 0x00000000);
1321 nv_mthd(priv, 0xa097, 0x2974, 0x00000000);
1322 nv_mthd(priv, 0xa097, 0x2978, 0x00000000);
1323 nv_mthd(priv, 0xa097, 0x297c, 0x00000000);
1324 nv_mthd(priv, 0xa097, 0x2980, 0x00000000);
1325 nv_mthd(priv, 0xa097, 0x2984, 0x00000000);
1326 nv_mthd(priv, 0xa097, 0x2988, 0x00000000);
1327 nv_mthd(priv, 0xa097, 0x298c, 0x00000000);
1328 nv_mthd(priv, 0xa097, 0x2990, 0x00000000);
1329 nv_mthd(priv, 0xa097, 0x2994, 0x00000000);
1330 nv_mthd(priv, 0xa097, 0x2998, 0x00000000);
1331 nv_mthd(priv, 0xa097, 0x299c, 0x00000000);
1332 nv_mthd(priv, 0xa097, 0x29a0, 0x00000000);
1333 nv_mthd(priv, 0xa097, 0x29a4, 0x00000000);
1334 nv_mthd(priv, 0xa097, 0x29a8, 0x00000000);
1335 nv_mthd(priv, 0xa097, 0x29ac, 0x00000000);
1336 nv_mthd(priv, 0xa097, 0x29b0, 0x00000000);
1337 nv_mthd(priv, 0xa097, 0x29b4, 0x00000000);
1338 nv_mthd(priv, 0xa097, 0x29b8, 0x00000000);
1339 nv_mthd(priv, 0xa097, 0x29bc, 0x00000000);
1340 nv_mthd(priv, 0xa097, 0x29c0, 0x00000000);
1341 nv_mthd(priv, 0xa097, 0x29c4, 0x00000000);
1342 nv_mthd(priv, 0xa097, 0x29c8, 0x00000000);
1343 nv_mthd(priv, 0xa097, 0x29cc, 0x00000000);
1344 nv_mthd(priv, 0xa097, 0x29d0, 0x00000000);
1345 nv_mthd(priv, 0xa097, 0x29d4, 0x00000000);
1346 nv_mthd(priv, 0xa097, 0x29d8, 0x00000000);
1347 nv_mthd(priv, 0xa097, 0x29dc, 0x00000000);
1348 nv_mthd(priv, 0xa097, 0x29e0, 0x00000000);
1349 nv_mthd(priv, 0xa097, 0x29e4, 0x00000000);
1350 nv_mthd(priv, 0xa097, 0x29e8, 0x00000000);
1351 nv_mthd(priv, 0xa097, 0x29ec, 0x00000000);
1352 nv_mthd(priv, 0xa097, 0x29f0, 0x00000000);
1353 nv_mthd(priv, 0xa097, 0x29f4, 0x00000000);
1354 nv_mthd(priv, 0xa097, 0x29f8, 0x00000000);
1355 nv_mthd(priv, 0xa097, 0x29fc, 0x00000000);
1356 nv_mthd(priv, 0xa097, 0x0a00, 0x00000000);
1357 nv_mthd(priv, 0xa097, 0x0a20, 0x00000000);
1358 nv_mthd(priv, 0xa097, 0x0a40, 0x00000000);
1359 nv_mthd(priv, 0xa097, 0x0a60, 0x00000000);
1360 nv_mthd(priv, 0xa097, 0x0a80, 0x00000000);
1361 nv_mthd(priv, 0xa097, 0x0aa0, 0x00000000);
1362 nv_mthd(priv, 0xa097, 0x0ac0, 0x00000000);
1363 nv_mthd(priv, 0xa097, 0x0ae0, 0x00000000);
1364 nv_mthd(priv, 0xa097, 0x0b00, 0x00000000);
1365 nv_mthd(priv, 0xa097, 0x0b20, 0x00000000);
1366 nv_mthd(priv, 0xa097, 0x0b40, 0x00000000);
1367 nv_mthd(priv, 0xa097, 0x0b60, 0x00000000);
1368 nv_mthd(priv, 0xa097, 0x0b80, 0x00000000);
1369 nv_mthd(priv, 0xa097, 0x0ba0, 0x00000000);
1370 nv_mthd(priv, 0xa097, 0x0bc0, 0x00000000);
1371 nv_mthd(priv, 0xa097, 0x0be0, 0x00000000);
1372 nv_mthd(priv, 0xa097, 0x0a04, 0x00000000);
1373 nv_mthd(priv, 0xa097, 0x0a24, 0x00000000);
1374 nv_mthd(priv, 0xa097, 0x0a44, 0x00000000);
1375 nv_mthd(priv, 0xa097, 0x0a64, 0x00000000);
1376 nv_mthd(priv, 0xa097, 0x0a84, 0x00000000);
1377 nv_mthd(priv, 0xa097, 0x0aa4, 0x00000000);
1378 nv_mthd(priv, 0xa097, 0x0ac4, 0x00000000);
1379 nv_mthd(priv, 0xa097, 0x0ae4, 0x00000000);
1380 nv_mthd(priv, 0xa097, 0x0b04, 0x00000000);
1381 nv_mthd(priv, 0xa097, 0x0b24, 0x00000000);
1382 nv_mthd(priv, 0xa097, 0x0b44, 0x00000000);
1383 nv_mthd(priv, 0xa097, 0x0b64, 0x00000000);
1384 nv_mthd(priv, 0xa097, 0x0b84, 0x00000000);
1385 nv_mthd(priv, 0xa097, 0x0ba4, 0x00000000);
1386 nv_mthd(priv, 0xa097, 0x0bc4, 0x00000000);
1387 nv_mthd(priv, 0xa097, 0x0be4, 0x00000000);
1388 nv_mthd(priv, 0xa097, 0x0a08, 0x00000000);
1389 nv_mthd(priv, 0xa097, 0x0a28, 0x00000000);
1390 nv_mthd(priv, 0xa097, 0x0a48, 0x00000000);
1391 nv_mthd(priv, 0xa097, 0x0a68, 0x00000000);
1392 nv_mthd(priv, 0xa097, 0x0a88, 0x00000000);
1393 nv_mthd(priv, 0xa097, 0x0aa8, 0x00000000);
1394 nv_mthd(priv, 0xa097, 0x0ac8, 0x00000000);
1395 nv_mthd(priv, 0xa097, 0x0ae8, 0x00000000);
1396 nv_mthd(priv, 0xa097, 0x0b08, 0x00000000);
1397 nv_mthd(priv, 0xa097, 0x0b28, 0x00000000);
1398 nv_mthd(priv, 0xa097, 0x0b48, 0x00000000);
1399 nv_mthd(priv, 0xa097, 0x0b68, 0x00000000);
1400 nv_mthd(priv, 0xa097, 0x0b88, 0x00000000);
1401 nv_mthd(priv, 0xa097, 0x0ba8, 0x00000000);
1402 nv_mthd(priv, 0xa097, 0x0bc8, 0x00000000);
1403 nv_mthd(priv, 0xa097, 0x0be8, 0x00000000);
1404 nv_mthd(priv, 0xa097, 0x0a0c, 0x00000000);
1405 nv_mthd(priv, 0xa097, 0x0a2c, 0x00000000);
1406 nv_mthd(priv, 0xa097, 0x0a4c, 0x00000000);
1407 nv_mthd(priv, 0xa097, 0x0a6c, 0x00000000);
1408 nv_mthd(priv, 0xa097, 0x0a8c, 0x00000000);
1409 nv_mthd(priv, 0xa097, 0x0aac, 0x00000000);
1410 nv_mthd(priv, 0xa097, 0x0acc, 0x00000000);
1411 nv_mthd(priv, 0xa097, 0x0aec, 0x00000000);
1412 nv_mthd(priv, 0xa097, 0x0b0c, 0x00000000);
1413 nv_mthd(priv, 0xa097, 0x0b2c, 0x00000000);
1414 nv_mthd(priv, 0xa097, 0x0b4c, 0x00000000);
1415 nv_mthd(priv, 0xa097, 0x0b6c, 0x00000000);
1416 nv_mthd(priv, 0xa097, 0x0b8c, 0x00000000);
1417 nv_mthd(priv, 0xa097, 0x0bac, 0x00000000);
1418 nv_mthd(priv, 0xa097, 0x0bcc, 0x00000000);
1419 nv_mthd(priv, 0xa097, 0x0bec, 0x00000000);
1420 nv_mthd(priv, 0xa097, 0x0a10, 0x00000000);
1421 nv_mthd(priv, 0xa097, 0x0a30, 0x00000000);
1422 nv_mthd(priv, 0xa097, 0x0a50, 0x00000000);
1423 nv_mthd(priv, 0xa097, 0x0a70, 0x00000000);
1424 nv_mthd(priv, 0xa097, 0x0a90, 0x00000000);
1425 nv_mthd(priv, 0xa097, 0x0ab0, 0x00000000);
1426 nv_mthd(priv, 0xa097, 0x0ad0, 0x00000000);
1427 nv_mthd(priv, 0xa097, 0x0af0, 0x00000000);
1428 nv_mthd(priv, 0xa097, 0x0b10, 0x00000000);
1429 nv_mthd(priv, 0xa097, 0x0b30, 0x00000000);
1430 nv_mthd(priv, 0xa097, 0x0b50, 0x00000000);
1431 nv_mthd(priv, 0xa097, 0x0b70, 0x00000000);
1432 nv_mthd(priv, 0xa097, 0x0b90, 0x00000000);
1433 nv_mthd(priv, 0xa097, 0x0bb0, 0x00000000);
1434 nv_mthd(priv, 0xa097, 0x0bd0, 0x00000000);
1435 nv_mthd(priv, 0xa097, 0x0bf0, 0x00000000);
1436 nv_mthd(priv, 0xa097, 0x0a14, 0x00000000);
1437 nv_mthd(priv, 0xa097, 0x0a34, 0x00000000);
1438 nv_mthd(priv, 0xa097, 0x0a54, 0x00000000);
1439 nv_mthd(priv, 0xa097, 0x0a74, 0x00000000);
1440 nv_mthd(priv, 0xa097, 0x0a94, 0x00000000);
1441 nv_mthd(priv, 0xa097, 0x0ab4, 0x00000000);
1442 nv_mthd(priv, 0xa097, 0x0ad4, 0x00000000);
1443 nv_mthd(priv, 0xa097, 0x0af4, 0x00000000);
1444 nv_mthd(priv, 0xa097, 0x0b14, 0x00000000);
1445 nv_mthd(priv, 0xa097, 0x0b34, 0x00000000);
1446 nv_mthd(priv, 0xa097, 0x0b54, 0x00000000);
1447 nv_mthd(priv, 0xa097, 0x0b74, 0x00000000);
1448 nv_mthd(priv, 0xa097, 0x0b94, 0x00000000);
1449 nv_mthd(priv, 0xa097, 0x0bb4, 0x00000000);
1450 nv_mthd(priv, 0xa097, 0x0bd4, 0x00000000);
1451 nv_mthd(priv, 0xa097, 0x0bf4, 0x00000000);
1452 nv_mthd(priv, 0xa097, 0x0c00, 0x00000000);
1453 nv_mthd(priv, 0xa097, 0x0c10, 0x00000000);
1454 nv_mthd(priv, 0xa097, 0x0c20, 0x00000000);
1455 nv_mthd(priv, 0xa097, 0x0c30, 0x00000000);
1456 nv_mthd(priv, 0xa097, 0x0c40, 0x00000000);
1457 nv_mthd(priv, 0xa097, 0x0c50, 0x00000000);
1458 nv_mthd(priv, 0xa097, 0x0c60, 0x00000000);
1459 nv_mthd(priv, 0xa097, 0x0c70, 0x00000000);
1460 nv_mthd(priv, 0xa097, 0x0c80, 0x00000000);
1461 nv_mthd(priv, 0xa097, 0x0c90, 0x00000000);
1462 nv_mthd(priv, 0xa097, 0x0ca0, 0x00000000);
1463 nv_mthd(priv, 0xa097, 0x0cb0, 0x00000000);
1464 nv_mthd(priv, 0xa097, 0x0cc0, 0x00000000);
1465 nv_mthd(priv, 0xa097, 0x0cd0, 0x00000000);
1466 nv_mthd(priv, 0xa097, 0x0ce0, 0x00000000);
1467 nv_mthd(priv, 0xa097, 0x0cf0, 0x00000000);
1468 nv_mthd(priv, 0xa097, 0x0c04, 0x00000000);
1469 nv_mthd(priv, 0xa097, 0x0c14, 0x00000000);
1470 nv_mthd(priv, 0xa097, 0x0c24, 0x00000000);
1471 nv_mthd(priv, 0xa097, 0x0c34, 0x00000000);
1472 nv_mthd(priv, 0xa097, 0x0c44, 0x00000000);
1473 nv_mthd(priv, 0xa097, 0x0c54, 0x00000000);
1474 nv_mthd(priv, 0xa097, 0x0c64, 0x00000000);
1475 nv_mthd(priv, 0xa097, 0x0c74, 0x00000000);
1476 nv_mthd(priv, 0xa097, 0x0c84, 0x00000000);
1477 nv_mthd(priv, 0xa097, 0x0c94, 0x00000000);
1478 nv_mthd(priv, 0xa097, 0x0ca4, 0x00000000);
1479 nv_mthd(priv, 0xa097, 0x0cb4, 0x00000000);
1480 nv_mthd(priv, 0xa097, 0x0cc4, 0x00000000);
1481 nv_mthd(priv, 0xa097, 0x0cd4, 0x00000000);
1482 nv_mthd(priv, 0xa097, 0x0ce4, 0x00000000);
1483 nv_mthd(priv, 0xa097, 0x0cf4, 0x00000000);
1484 nv_mthd(priv, 0xa097, 0x0c08, 0x00000000);
1485 nv_mthd(priv, 0xa097, 0x0c18, 0x00000000);
1486 nv_mthd(priv, 0xa097, 0x0c28, 0x00000000);
1487 nv_mthd(priv, 0xa097, 0x0c38, 0x00000000);
1488 nv_mthd(priv, 0xa097, 0x0c48, 0x00000000);
1489 nv_mthd(priv, 0xa097, 0x0c58, 0x00000000);
1490 nv_mthd(priv, 0xa097, 0x0c68, 0x00000000);
1491 nv_mthd(priv, 0xa097, 0x0c78, 0x00000000);
1492 nv_mthd(priv, 0xa097, 0x0c88, 0x00000000);
1493 nv_mthd(priv, 0xa097, 0x0c98, 0x00000000);
1494 nv_mthd(priv, 0xa097, 0x0ca8, 0x00000000);
1495 nv_mthd(priv, 0xa097, 0x0cb8, 0x00000000);
1496 nv_mthd(priv, 0xa097, 0x0cc8, 0x00000000);
1497 nv_mthd(priv, 0xa097, 0x0cd8, 0x00000000);
1498 nv_mthd(priv, 0xa097, 0x0ce8, 0x00000000);
1499 nv_mthd(priv, 0xa097, 0x0cf8, 0x00000000);
1500 nv_mthd(priv, 0xa097, 0x0c0c, 0x3f800000);
1501 nv_mthd(priv, 0xa097, 0x0c1c, 0x3f800000);
1502 nv_mthd(priv, 0xa097, 0x0c2c, 0x3f800000);
1503 nv_mthd(priv, 0xa097, 0x0c3c, 0x3f800000);
1504 nv_mthd(priv, 0xa097, 0x0c4c, 0x3f800000);
1505 nv_mthd(priv, 0xa097, 0x0c5c, 0x3f800000);
1506 nv_mthd(priv, 0xa097, 0x0c6c, 0x3f800000);
1507 nv_mthd(priv, 0xa097, 0x0c7c, 0x3f800000);
1508 nv_mthd(priv, 0xa097, 0x0c8c, 0x3f800000);
1509 nv_mthd(priv, 0xa097, 0x0c9c, 0x3f800000);
1510 nv_mthd(priv, 0xa097, 0x0cac, 0x3f800000);
1511 nv_mthd(priv, 0xa097, 0x0cbc, 0x3f800000);
1512 nv_mthd(priv, 0xa097, 0x0ccc, 0x3f800000);
1513 nv_mthd(priv, 0xa097, 0x0cdc, 0x3f800000);
1514 nv_mthd(priv, 0xa097, 0x0cec, 0x3f800000);
1515 nv_mthd(priv, 0xa097, 0x0cfc, 0x3f800000);
1516 nv_mthd(priv, 0xa097, 0x0d00, 0xffff0000);
1517 nv_mthd(priv, 0xa097, 0x0d08, 0xffff0000);
1518 nv_mthd(priv, 0xa097, 0x0d10, 0xffff0000);
1519 nv_mthd(priv, 0xa097, 0x0d18, 0xffff0000);
1520 nv_mthd(priv, 0xa097, 0x0d20, 0xffff0000);
1521 nv_mthd(priv, 0xa097, 0x0d28, 0xffff0000);
1522 nv_mthd(priv, 0xa097, 0x0d30, 0xffff0000);
1523 nv_mthd(priv, 0xa097, 0x0d38, 0xffff0000);
1524 nv_mthd(priv, 0xa097, 0x0d04, 0xffff0000);
1525 nv_mthd(priv, 0xa097, 0x0d0c, 0xffff0000);
1526 nv_mthd(priv, 0xa097, 0x0d14, 0xffff0000);
1527 nv_mthd(priv, 0xa097, 0x0d1c, 0xffff0000);
1528 nv_mthd(priv, 0xa097, 0x0d24, 0xffff0000);
1529 nv_mthd(priv, 0xa097, 0x0d2c, 0xffff0000);
1530 nv_mthd(priv, 0xa097, 0x0d34, 0xffff0000);
1531 nv_mthd(priv, 0xa097, 0x0d3c, 0xffff0000);
1532 nv_mthd(priv, 0xa097, 0x0e00, 0x00000000);
1533 nv_mthd(priv, 0xa097, 0x0e10, 0x00000000);
1534 nv_mthd(priv, 0xa097, 0x0e20, 0x00000000);
1535 nv_mthd(priv, 0xa097, 0x0e30, 0x00000000);
1536 nv_mthd(priv, 0xa097, 0x0e40, 0x00000000);
1537 nv_mthd(priv, 0xa097, 0x0e50, 0x00000000);
1538 nv_mthd(priv, 0xa097, 0x0e60, 0x00000000);
1539 nv_mthd(priv, 0xa097, 0x0e70, 0x00000000);
1540 nv_mthd(priv, 0xa097, 0x0e80, 0x00000000);
1541 nv_mthd(priv, 0xa097, 0x0e90, 0x00000000);
1542 nv_mthd(priv, 0xa097, 0x0ea0, 0x00000000);
1543 nv_mthd(priv, 0xa097, 0x0eb0, 0x00000000);
1544 nv_mthd(priv, 0xa097, 0x0ec0, 0x00000000);
1545 nv_mthd(priv, 0xa097, 0x0ed0, 0x00000000);
1546 nv_mthd(priv, 0xa097, 0x0ee0, 0x00000000);
1547 nv_mthd(priv, 0xa097, 0x0ef0, 0x00000000);
1548 nv_mthd(priv, 0xa097, 0x0e04, 0xffff0000);
1549 nv_mthd(priv, 0xa097, 0x0e14, 0xffff0000);
1550 nv_mthd(priv, 0xa097, 0x0e24, 0xffff0000);
1551 nv_mthd(priv, 0xa097, 0x0e34, 0xffff0000);
1552 nv_mthd(priv, 0xa097, 0x0e44, 0xffff0000);
1553 nv_mthd(priv, 0xa097, 0x0e54, 0xffff0000);
1554 nv_mthd(priv, 0xa097, 0x0e64, 0xffff0000);
1555 nv_mthd(priv, 0xa097, 0x0e74, 0xffff0000);
1556 nv_mthd(priv, 0xa097, 0x0e84, 0xffff0000);
1557 nv_mthd(priv, 0xa097, 0x0e94, 0xffff0000);
1558 nv_mthd(priv, 0xa097, 0x0ea4, 0xffff0000);
1559 nv_mthd(priv, 0xa097, 0x0eb4, 0xffff0000);
1560 nv_mthd(priv, 0xa097, 0x0ec4, 0xffff0000);
1561 nv_mthd(priv, 0xa097, 0x0ed4, 0xffff0000);
1562 nv_mthd(priv, 0xa097, 0x0ee4, 0xffff0000);
1563 nv_mthd(priv, 0xa097, 0x0ef4, 0xffff0000);
1564 nv_mthd(priv, 0xa097, 0x0e08, 0xffff0000);
1565 nv_mthd(priv, 0xa097, 0x0e18, 0xffff0000);
1566 nv_mthd(priv, 0xa097, 0x0e28, 0xffff0000);
1567 nv_mthd(priv, 0xa097, 0x0e38, 0xffff0000);
1568 nv_mthd(priv, 0xa097, 0x0e48, 0xffff0000);
1569 nv_mthd(priv, 0xa097, 0x0e58, 0xffff0000);
1570 nv_mthd(priv, 0xa097, 0x0e68, 0xffff0000);
1571 nv_mthd(priv, 0xa097, 0x0e78, 0xffff0000);
1572 nv_mthd(priv, 0xa097, 0x0e88, 0xffff0000);
1573 nv_mthd(priv, 0xa097, 0x0e98, 0xffff0000);
1574 nv_mthd(priv, 0xa097, 0x0ea8, 0xffff0000);
1575 nv_mthd(priv, 0xa097, 0x0eb8, 0xffff0000);
1576 nv_mthd(priv, 0xa097, 0x0ec8, 0xffff0000);
1577 nv_mthd(priv, 0xa097, 0x0ed8, 0xffff0000);
1578 nv_mthd(priv, 0xa097, 0x0ee8, 0xffff0000);
1579 nv_mthd(priv, 0xa097, 0x0ef8, 0xffff0000);
1580 nv_mthd(priv, 0xa097, 0x0d40, 0x00000000);
1581 nv_mthd(priv, 0xa097, 0x0d48, 0x00000000);
1582 nv_mthd(priv, 0xa097, 0x0d50, 0x00000000);
1583 nv_mthd(priv, 0xa097, 0x0d58, 0x00000000);
1584 nv_mthd(priv, 0xa097, 0x0d44, 0x00000000);
1585 nv_mthd(priv, 0xa097, 0x0d4c, 0x00000000);
1586 nv_mthd(priv, 0xa097, 0x0d54, 0x00000000);
1587 nv_mthd(priv, 0xa097, 0x0d5c, 0x00000000);
1588 nv_mthd(priv, 0xa097, 0x1e00, 0x00000001);
1589 nv_mthd(priv, 0xa097, 0x1e20, 0x00000001);
1590 nv_mthd(priv, 0xa097, 0x1e40, 0x00000001);
1591 nv_mthd(priv, 0xa097, 0x1e60, 0x00000001);
1592 nv_mthd(priv, 0xa097, 0x1e80, 0x00000001);
1593 nv_mthd(priv, 0xa097, 0x1ea0, 0x00000001);
1594 nv_mthd(priv, 0xa097, 0x1ec0, 0x00000001);
1595 nv_mthd(priv, 0xa097, 0x1ee0, 0x00000001);
1596 nv_mthd(priv, 0xa097, 0x1e04, 0x00000001);
1597 nv_mthd(priv, 0xa097, 0x1e24, 0x00000001);
1598 nv_mthd(priv, 0xa097, 0x1e44, 0x00000001);
1599 nv_mthd(priv, 0xa097, 0x1e64, 0x00000001);
1600 nv_mthd(priv, 0xa097, 0x1e84, 0x00000001);
1601 nv_mthd(priv, 0xa097, 0x1ea4, 0x00000001);
1602 nv_mthd(priv, 0xa097, 0x1ec4, 0x00000001);
1603 nv_mthd(priv, 0xa097, 0x1ee4, 0x00000001);
1604 nv_mthd(priv, 0xa097, 0x1e08, 0x00000002);
1605 nv_mthd(priv, 0xa097, 0x1e28, 0x00000002);
1606 nv_mthd(priv, 0xa097, 0x1e48, 0x00000002);
1607 nv_mthd(priv, 0xa097, 0x1e68, 0x00000002);
1608 nv_mthd(priv, 0xa097, 0x1e88, 0x00000002);
1609 nv_mthd(priv, 0xa097, 0x1ea8, 0x00000002);
1610 nv_mthd(priv, 0xa097, 0x1ec8, 0x00000002);
1611 nv_mthd(priv, 0xa097, 0x1ee8, 0x00000002);
1612 nv_mthd(priv, 0xa097, 0x1e0c, 0x00000001);
1613 nv_mthd(priv, 0xa097, 0x1e2c, 0x00000001);
1614 nv_mthd(priv, 0xa097, 0x1e4c, 0x00000001);
1615 nv_mthd(priv, 0xa097, 0x1e6c, 0x00000001);
1616 nv_mthd(priv, 0xa097, 0x1e8c, 0x00000001);
1617 nv_mthd(priv, 0xa097, 0x1eac, 0x00000001);
1618 nv_mthd(priv, 0xa097, 0x1ecc, 0x00000001);
1619 nv_mthd(priv, 0xa097, 0x1eec, 0x00000001);
1620 nv_mthd(priv, 0xa097, 0x1e10, 0x00000001);
1621 nv_mthd(priv, 0xa097, 0x1e30, 0x00000001);
1622 nv_mthd(priv, 0xa097, 0x1e50, 0x00000001);
1623 nv_mthd(priv, 0xa097, 0x1e70, 0x00000001);
1624 nv_mthd(priv, 0xa097, 0x1e90, 0x00000001);
1625 nv_mthd(priv, 0xa097, 0x1eb0, 0x00000001);
1626 nv_mthd(priv, 0xa097, 0x1ed0, 0x00000001);
1627 nv_mthd(priv, 0xa097, 0x1ef0, 0x00000001);
1628 nv_mthd(priv, 0xa097, 0x1e14, 0x00000002);
1629 nv_mthd(priv, 0xa097, 0x1e34, 0x00000002);
1630 nv_mthd(priv, 0xa097, 0x1e54, 0x00000002);
1631 nv_mthd(priv, 0xa097, 0x1e74, 0x00000002);
1632 nv_mthd(priv, 0xa097, 0x1e94, 0x00000002);
1633 nv_mthd(priv, 0xa097, 0x1eb4, 0x00000002);
1634 nv_mthd(priv, 0xa097, 0x1ed4, 0x00000002);
1635 nv_mthd(priv, 0xa097, 0x1ef4, 0x00000002);
1636 nv_mthd(priv, 0xa097, 0x1e18, 0x00000001);
1637 nv_mthd(priv, 0xa097, 0x1e38, 0x00000001);
1638 nv_mthd(priv, 0xa097, 0x1e58, 0x00000001);
1639 nv_mthd(priv, 0xa097, 0x1e78, 0x00000001);
1640 nv_mthd(priv, 0xa097, 0x1e98, 0x00000001);
1641 nv_mthd(priv, 0xa097, 0x1eb8, 0x00000001);
1642 nv_mthd(priv, 0xa097, 0x1ed8, 0x00000001);
1643 nv_mthd(priv, 0xa097, 0x1ef8, 0x00000001);
1644 nv_mthd(priv, 0xa097, 0x3400, 0x00000000);
1645 nv_mthd(priv, 0xa097, 0x3404, 0x00000000);
1646 nv_mthd(priv, 0xa097, 0x3408, 0x00000000);
1647 nv_mthd(priv, 0xa097, 0x340c, 0x00000000);
1648 nv_mthd(priv, 0xa097, 0x3410, 0x00000000);
1649 nv_mthd(priv, 0xa097, 0x3414, 0x00000000);
1650 nv_mthd(priv, 0xa097, 0x3418, 0x00000000);
1651 nv_mthd(priv, 0xa097, 0x341c, 0x00000000);
1652 nv_mthd(priv, 0xa097, 0x3420, 0x00000000);
1653 nv_mthd(priv, 0xa097, 0x3424, 0x00000000);
1654 nv_mthd(priv, 0xa097, 0x3428, 0x00000000);
1655 nv_mthd(priv, 0xa097, 0x342c, 0x00000000);
1656 nv_mthd(priv, 0xa097, 0x3430, 0x00000000);
1657 nv_mthd(priv, 0xa097, 0x3434, 0x00000000);
1658 nv_mthd(priv, 0xa097, 0x3438, 0x00000000);
1659 nv_mthd(priv, 0xa097, 0x343c, 0x00000000);
1660 nv_mthd(priv, 0xa097, 0x3440, 0x00000000);
1661 nv_mthd(priv, 0xa097, 0x3444, 0x00000000);
1662 nv_mthd(priv, 0xa097, 0x3448, 0x00000000);
1663 nv_mthd(priv, 0xa097, 0x344c, 0x00000000);
1664 nv_mthd(priv, 0xa097, 0x3450, 0x00000000);
1665 nv_mthd(priv, 0xa097, 0x3454, 0x00000000);
1666 nv_mthd(priv, 0xa097, 0x3458, 0x00000000);
1667 nv_mthd(priv, 0xa097, 0x345c, 0x00000000);
1668 nv_mthd(priv, 0xa097, 0x3460, 0x00000000);
1669 nv_mthd(priv, 0xa097, 0x3464, 0x00000000);
1670 nv_mthd(priv, 0xa097, 0x3468, 0x00000000);
1671 nv_mthd(priv, 0xa097, 0x346c, 0x00000000);
1672 nv_mthd(priv, 0xa097, 0x3470, 0x00000000);
1673 nv_mthd(priv, 0xa097, 0x3474, 0x00000000);
1674 nv_mthd(priv, 0xa097, 0x3478, 0x00000000);
1675 nv_mthd(priv, 0xa097, 0x347c, 0x00000000);
1676 nv_mthd(priv, 0xa097, 0x3480, 0x00000000);
1677 nv_mthd(priv, 0xa097, 0x3484, 0x00000000);
1678 nv_mthd(priv, 0xa097, 0x3488, 0x00000000);
1679 nv_mthd(priv, 0xa097, 0x348c, 0x00000000);
1680 nv_mthd(priv, 0xa097, 0x3490, 0x00000000);
1681 nv_mthd(priv, 0xa097, 0x3494, 0x00000000);
1682 nv_mthd(priv, 0xa097, 0x3498, 0x00000000);
1683 nv_mthd(priv, 0xa097, 0x349c, 0x00000000);
1684 nv_mthd(priv, 0xa097, 0x34a0, 0x00000000);
1685 nv_mthd(priv, 0xa097, 0x34a4, 0x00000000);
1686 nv_mthd(priv, 0xa097, 0x34a8, 0x00000000);
1687 nv_mthd(priv, 0xa097, 0x34ac, 0x00000000);
1688 nv_mthd(priv, 0xa097, 0x34b0, 0x00000000);
1689 nv_mthd(priv, 0xa097, 0x34b4, 0x00000000);
1690 nv_mthd(priv, 0xa097, 0x34b8, 0x00000000);
1691 nv_mthd(priv, 0xa097, 0x34bc, 0x00000000);
1692 nv_mthd(priv, 0xa097, 0x34c0, 0x00000000);
1693 nv_mthd(priv, 0xa097, 0x34c4, 0x00000000);
1694 nv_mthd(priv, 0xa097, 0x34c8, 0x00000000);
1695 nv_mthd(priv, 0xa097, 0x34cc, 0x00000000);
1696 nv_mthd(priv, 0xa097, 0x34d0, 0x00000000);
1697 nv_mthd(priv, 0xa097, 0x34d4, 0x00000000);
1698 nv_mthd(priv, 0xa097, 0x34d8, 0x00000000);
1699 nv_mthd(priv, 0xa097, 0x34dc, 0x00000000);
1700 nv_mthd(priv, 0xa097, 0x34e0, 0x00000000);
1701 nv_mthd(priv, 0xa097, 0x34e4, 0x00000000);
1702 nv_mthd(priv, 0xa097, 0x34e8, 0x00000000);
1703 nv_mthd(priv, 0xa097, 0x34ec, 0x00000000);
1704 nv_mthd(priv, 0xa097, 0x34f0, 0x00000000);
1705 nv_mthd(priv, 0xa097, 0x34f4, 0x00000000);
1706 nv_mthd(priv, 0xa097, 0x34f8, 0x00000000);
1707 nv_mthd(priv, 0xa097, 0x34fc, 0x00000000);
1708 nv_mthd(priv, 0xa097, 0x3500, 0x00000000);
1709 nv_mthd(priv, 0xa097, 0x3504, 0x00000000);
1710 nv_mthd(priv, 0xa097, 0x3508, 0x00000000);
1711 nv_mthd(priv, 0xa097, 0x350c, 0x00000000);
1712 nv_mthd(priv, 0xa097, 0x3510, 0x00000000);
1713 nv_mthd(priv, 0xa097, 0x3514, 0x00000000);
1714 nv_mthd(priv, 0xa097, 0x3518, 0x00000000);
1715 nv_mthd(priv, 0xa097, 0x351c, 0x00000000);
1716 nv_mthd(priv, 0xa097, 0x3520, 0x00000000);
1717 nv_mthd(priv, 0xa097, 0x3524, 0x00000000);
1718 nv_mthd(priv, 0xa097, 0x3528, 0x00000000);
1719 nv_mthd(priv, 0xa097, 0x352c, 0x00000000);
1720 nv_mthd(priv, 0xa097, 0x3530, 0x00000000);
1721 nv_mthd(priv, 0xa097, 0x3534, 0x00000000);
1722 nv_mthd(priv, 0xa097, 0x3538, 0x00000000);
1723 nv_mthd(priv, 0xa097, 0x353c, 0x00000000);
1724 nv_mthd(priv, 0xa097, 0x3540, 0x00000000);
1725 nv_mthd(priv, 0xa097, 0x3544, 0x00000000);
1726 nv_mthd(priv, 0xa097, 0x3548, 0x00000000);
1727 nv_mthd(priv, 0xa097, 0x354c, 0x00000000);
1728 nv_mthd(priv, 0xa097, 0x3550, 0x00000000);
1729 nv_mthd(priv, 0xa097, 0x3554, 0x00000000);
1730 nv_mthd(priv, 0xa097, 0x3558, 0x00000000);
1731 nv_mthd(priv, 0xa097, 0x355c, 0x00000000);
1732 nv_mthd(priv, 0xa097, 0x3560, 0x00000000);
1733 nv_mthd(priv, 0xa097, 0x3564, 0x00000000);
1734 nv_mthd(priv, 0xa097, 0x3568, 0x00000000);
1735 nv_mthd(priv, 0xa097, 0x356c, 0x00000000);
1736 nv_mthd(priv, 0xa097, 0x3570, 0x00000000);
1737 nv_mthd(priv, 0xa097, 0x3574, 0x00000000);
1738 nv_mthd(priv, 0xa097, 0x3578, 0x00000000);
1739 nv_mthd(priv, 0xa097, 0x357c, 0x00000000);
1740 nv_mthd(priv, 0xa097, 0x3580, 0x00000000);
1741 nv_mthd(priv, 0xa097, 0x3584, 0x00000000);
1742 nv_mthd(priv, 0xa097, 0x3588, 0x00000000);
1743 nv_mthd(priv, 0xa097, 0x358c, 0x00000000);
1744 nv_mthd(priv, 0xa097, 0x3590, 0x00000000);
1745 nv_mthd(priv, 0xa097, 0x3594, 0x00000000);
1746 nv_mthd(priv, 0xa097, 0x3598, 0x00000000);
1747 nv_mthd(priv, 0xa097, 0x359c, 0x00000000);
1748 nv_mthd(priv, 0xa097, 0x35a0, 0x00000000);
1749 nv_mthd(priv, 0xa097, 0x35a4, 0x00000000);
1750 nv_mthd(priv, 0xa097, 0x35a8, 0x00000000);
1751 nv_mthd(priv, 0xa097, 0x35ac, 0x00000000);
1752 nv_mthd(priv, 0xa097, 0x35b0, 0x00000000);
1753 nv_mthd(priv, 0xa097, 0x35b4, 0x00000000);
1754 nv_mthd(priv, 0xa097, 0x35b8, 0x00000000);
1755 nv_mthd(priv, 0xa097, 0x35bc, 0x00000000);
1756 nv_mthd(priv, 0xa097, 0x35c0, 0x00000000);
1757 nv_mthd(priv, 0xa097, 0x35c4, 0x00000000);
1758 nv_mthd(priv, 0xa097, 0x35c8, 0x00000000);
1759 nv_mthd(priv, 0xa097, 0x35cc, 0x00000000);
1760 nv_mthd(priv, 0xa097, 0x35d0, 0x00000000);
1761 nv_mthd(priv, 0xa097, 0x35d4, 0x00000000);
1762 nv_mthd(priv, 0xa097, 0x35d8, 0x00000000);
1763 nv_mthd(priv, 0xa097, 0x35dc, 0x00000000);
1764 nv_mthd(priv, 0xa097, 0x35e0, 0x00000000);
1765 nv_mthd(priv, 0xa097, 0x35e4, 0x00000000);
1766 nv_mthd(priv, 0xa097, 0x35e8, 0x00000000);
1767 nv_mthd(priv, 0xa097, 0x35ec, 0x00000000);
1768 nv_mthd(priv, 0xa097, 0x35f0, 0x00000000);
1769 nv_mthd(priv, 0xa097, 0x35f4, 0x00000000);
1770 nv_mthd(priv, 0xa097, 0x35f8, 0x00000000);
1771 nv_mthd(priv, 0xa097, 0x35fc, 0x00000000);
1772 nv_mthd(priv, 0xa097, 0x030c, 0x00000001);
1773 nv_mthd(priv, 0xa097, 0x1944, 0x00000000);
1774 nv_mthd(priv, 0xa097, 0x1514, 0x00000000);
1775 nv_mthd(priv, 0xa097, 0x0d68, 0x0000ffff);
1776 nv_mthd(priv, 0xa097, 0x121c, 0x0fac6881);
1777 nv_mthd(priv, 0xa097, 0x0fac, 0x00000001);
1778 nv_mthd(priv, 0xa097, 0x1538, 0x00000001);
1779 nv_mthd(priv, 0xa097, 0x0fe0, 0x00000000);
1780 nv_mthd(priv, 0xa097, 0x0fe4, 0x00000000);
1781 nv_mthd(priv, 0xa097, 0x0fe8, 0x00000014);
1782 nv_mthd(priv, 0xa097, 0x0fec, 0x00000040);
1783 nv_mthd(priv, 0xa097, 0x0ff0, 0x00000000);
1784 nv_mthd(priv, 0xa097, 0x179c, 0x00000000);
1785 nv_mthd(priv, 0xa097, 0x1228, 0x00000400);
1786 nv_mthd(priv, 0xa097, 0x122c, 0x00000300);
1787 nv_mthd(priv, 0xa097, 0x1230, 0x00010001);
1788 nv_mthd(priv, 0xa097, 0x07f8, 0x00000000);
1789 nv_mthd(priv, 0xa097, 0x15b4, 0x00000001);
1790 nv_mthd(priv, 0xa097, 0x15cc, 0x00000000);
1791 nv_mthd(priv, 0xa097, 0x1534, 0x00000000);
1792 nv_mthd(priv, 0xa097, 0x0fb0, 0x00000000);
1793 nv_mthd(priv, 0xa097, 0x15d0, 0x00000000);
1794 nv_mthd(priv, 0xa097, 0x153c, 0x00000000);
1795 nv_mthd(priv, 0xa097, 0x16b4, 0x00000003);
1796 nv_mthd(priv, 0xa097, 0x0fbc, 0x0000ffff);
1797 nv_mthd(priv, 0xa097, 0x0fc0, 0x0000ffff);
1798 nv_mthd(priv, 0xa097, 0x0fc4, 0x0000ffff);
1799 nv_mthd(priv, 0xa097, 0x0fc8, 0x0000ffff);
1800 nv_mthd(priv, 0xa097, 0x0df8, 0x00000000);
1801 nv_mthd(priv, 0xa097, 0x0dfc, 0x00000000);
1802 nv_mthd(priv, 0xa097, 0x1948, 0x00000000);
1803 nv_mthd(priv, 0xa097, 0x1970, 0x00000001);
1804 nv_mthd(priv, 0xa097, 0x161c, 0x000009f0);
1805 nv_mthd(priv, 0xa097, 0x0dcc, 0x00000010);
1806 nv_mthd(priv, 0xa097, 0x163c, 0x00000000);
1807 nv_mthd(priv, 0xa097, 0x15e4, 0x00000000);
1808 nv_mthd(priv, 0xa097, 0x1160, 0x25e00040);
1809 nv_mthd(priv, 0xa097, 0x1164, 0x25e00040);
1810 nv_mthd(priv, 0xa097, 0x1168, 0x25e00040);
1811 nv_mthd(priv, 0xa097, 0x116c, 0x25e00040);
1812 nv_mthd(priv, 0xa097, 0x1170, 0x25e00040);
1813 nv_mthd(priv, 0xa097, 0x1174, 0x25e00040);
1814 nv_mthd(priv, 0xa097, 0x1178, 0x25e00040);
1815 nv_mthd(priv, 0xa097, 0x117c, 0x25e00040);
1816 nv_mthd(priv, 0xa097, 0x1180, 0x25e00040);
1817 nv_mthd(priv, 0xa097, 0x1184, 0x25e00040);
1818 nv_mthd(priv, 0xa097, 0x1188, 0x25e00040);
1819 nv_mthd(priv, 0xa097, 0x118c, 0x25e00040);
1820 nv_mthd(priv, 0xa097, 0x1190, 0x25e00040);
1821 nv_mthd(priv, 0xa097, 0x1194, 0x25e00040);
1822 nv_mthd(priv, 0xa097, 0x1198, 0x25e00040);
1823 nv_mthd(priv, 0xa097, 0x119c, 0x25e00040);
1824 nv_mthd(priv, 0xa097, 0x11a0, 0x25e00040);
1825 nv_mthd(priv, 0xa097, 0x11a4, 0x25e00040);
1826 nv_mthd(priv, 0xa097, 0x11a8, 0x25e00040);
1827 nv_mthd(priv, 0xa097, 0x11ac, 0x25e00040);
1828 nv_mthd(priv, 0xa097, 0x11b0, 0x25e00040);
1829 nv_mthd(priv, 0xa097, 0x11b4, 0x25e00040);
1830 nv_mthd(priv, 0xa097, 0x11b8, 0x25e00040);
1831 nv_mthd(priv, 0xa097, 0x11bc, 0x25e00040);
1832 nv_mthd(priv, 0xa097, 0x11c0, 0x25e00040);
1833 nv_mthd(priv, 0xa097, 0x11c4, 0x25e00040);
1834 nv_mthd(priv, 0xa097, 0x11c8, 0x25e00040);
1835 nv_mthd(priv, 0xa097, 0x11cc, 0x25e00040);
1836 nv_mthd(priv, 0xa097, 0x11d0, 0x25e00040);
1837 nv_mthd(priv, 0xa097, 0x11d4, 0x25e00040);
1838 nv_mthd(priv, 0xa097, 0x11d8, 0x25e00040);
1839 nv_mthd(priv, 0xa097, 0x11dc, 0x25e00040);
1840 nv_mthd(priv, 0xa097, 0x1880, 0x00000000);
1841 nv_mthd(priv, 0xa097, 0x1884, 0x00000000);
1842 nv_mthd(priv, 0xa097, 0x1888, 0x00000000);
1843 nv_mthd(priv, 0xa097, 0x188c, 0x00000000);
1844 nv_mthd(priv, 0xa097, 0x1890, 0x00000000);
1845 nv_mthd(priv, 0xa097, 0x1894, 0x00000000);
1846 nv_mthd(priv, 0xa097, 0x1898, 0x00000000);
1847 nv_mthd(priv, 0xa097, 0x189c, 0x00000000);
1848 nv_mthd(priv, 0xa097, 0x18a0, 0x00000000);
1849 nv_mthd(priv, 0xa097, 0x18a4, 0x00000000);
1850 nv_mthd(priv, 0xa097, 0x18a8, 0x00000000);
1851 nv_mthd(priv, 0xa097, 0x18ac, 0x00000000);
1852 nv_mthd(priv, 0xa097, 0x18b0, 0x00000000);
1853 nv_mthd(priv, 0xa097, 0x18b4, 0x00000000);
1854 nv_mthd(priv, 0xa097, 0x18b8, 0x00000000);
1855 nv_mthd(priv, 0xa097, 0x18bc, 0x00000000);
1856 nv_mthd(priv, 0xa097, 0x18c0, 0x00000000);
1857 nv_mthd(priv, 0xa097, 0x18c4, 0x00000000);
1858 nv_mthd(priv, 0xa097, 0x18c8, 0x00000000);
1859 nv_mthd(priv, 0xa097, 0x18cc, 0x00000000);
1860 nv_mthd(priv, 0xa097, 0x18d0, 0x00000000);
1861 nv_mthd(priv, 0xa097, 0x18d4, 0x00000000);
1862 nv_mthd(priv, 0xa097, 0x18d8, 0x00000000);
1863 nv_mthd(priv, 0xa097, 0x18dc, 0x00000000);
1864 nv_mthd(priv, 0xa097, 0x18e0, 0x00000000);
1865 nv_mthd(priv, 0xa097, 0x18e4, 0x00000000);
1866 nv_mthd(priv, 0xa097, 0x18e8, 0x00000000);
1867 nv_mthd(priv, 0xa097, 0x18ec, 0x00000000);
1868 nv_mthd(priv, 0xa097, 0x18f0, 0x00000000);
1869 nv_mthd(priv, 0xa097, 0x18f4, 0x00000000);
1870 nv_mthd(priv, 0xa097, 0x18f8, 0x00000000);
1871 nv_mthd(priv, 0xa097, 0x18fc, 0x00000000);
1872 nv_mthd(priv, 0xa097, 0x0f84, 0x00000000);
1873 nv_mthd(priv, 0xa097, 0x0f88, 0x00000000);
1874 nv_mthd(priv, 0xa097, 0x17c8, 0x00000000);
1875 nv_mthd(priv, 0xa097, 0x17cc, 0x00000000);
1876 nv_mthd(priv, 0xa097, 0x17d0, 0x000000ff);
1877 nv_mthd(priv, 0xa097, 0x17d4, 0xffffffff);
1878 nv_mthd(priv, 0xa097, 0x17d8, 0x00000002);
1879 nv_mthd(priv, 0xa097, 0x17dc, 0x00000000);
1880 nv_mthd(priv, 0xa097, 0x15f4, 0x00000000);
1881 nv_mthd(priv, 0xa097, 0x15f8, 0x00000000);
1882 nv_mthd(priv, 0xa097, 0x1434, 0x00000000);
1883 nv_mthd(priv, 0xa097, 0x1438, 0x00000000);
1884 nv_mthd(priv, 0xa097, 0x0d74, 0x00000000);
1885 nv_mthd(priv, 0xa097, 0x0dec, 0x00000001);
1886 nv_mthd(priv, 0xa097, 0x13a4, 0x00000000);
1887 nv_mthd(priv, 0xa097, 0x1318, 0x00000001);
1888 nv_mthd(priv, 0xa097, 0x1644, 0x00000000);
1889 nv_mthd(priv, 0xa097, 0x0748, 0x00000000);
1890 nv_mthd(priv, 0xa097, 0x0de8, 0x00000000);
1891 nv_mthd(priv, 0xa097, 0x1648, 0x00000000);
1892 nv_mthd(priv, 0xa097, 0x12a4, 0x00000000);
1893 nv_mthd(priv, 0xa097, 0x1120, 0x00000000);
1894 nv_mthd(priv, 0xa097, 0x1124, 0x00000000);
1895 nv_mthd(priv, 0xa097, 0x1128, 0x00000000);
1896 nv_mthd(priv, 0xa097, 0x112c, 0x00000000);
1897 nv_mthd(priv, 0xa097, 0x1118, 0x00000000);
1898 nv_mthd(priv, 0xa097, 0x164c, 0x00000000);
1899 nv_mthd(priv, 0xa097, 0x1658, 0x00000000);
1900 nv_mthd(priv, 0xa097, 0x1910, 0x00000290);
1901 nv_mthd(priv, 0xa097, 0x1518, 0x00000000);
1902 nv_mthd(priv, 0xa097, 0x165c, 0x00000001);
1903 nv_mthd(priv, 0xa097, 0x1520, 0x00000000);
1904 nv_mthd(priv, 0xa097, 0x1604, 0x00000000);
1905 nv_mthd(priv, 0xa097, 0x1570, 0x00000000);
1906 nv_mthd(priv, 0xa097, 0x13b0, 0x3f800000);
1907 nv_mthd(priv, 0xa097, 0x13b4, 0x3f800000);
1908 nv_mthd(priv, 0xa097, 0x020c, 0x00000000);
1909 nv_mthd(priv, 0xa097, 0x1670, 0x30201000);
1910 nv_mthd(priv, 0xa097, 0x1674, 0x70605040);
1911 nv_mthd(priv, 0xa097, 0x1678, 0xb8a89888);
1912 nv_mthd(priv, 0xa097, 0x167c, 0xf8e8d8c8);
1913 nv_mthd(priv, 0xa097, 0x166c, 0x00000000);
1914 nv_mthd(priv, 0xa097, 0x1680, 0x00ffff00);
1915 nv_mthd(priv, 0xa097, 0x12d0, 0x00000003);
1916 nv_mthd(priv, 0xa097, 0x12d4, 0x00000002);
1917 nv_mthd(priv, 0xa097, 0x1684, 0x00000000);
1918 nv_mthd(priv, 0xa097, 0x1688, 0x00000000);
1919 nv_mthd(priv, 0xa097, 0x0dac, 0x00001b02);
1920 nv_mthd(priv, 0xa097, 0x0db0, 0x00001b02);
1921 nv_mthd(priv, 0xa097, 0x0db4, 0x00000000);
1922 nv_mthd(priv, 0xa097, 0x168c, 0x00000000);
1923 nv_mthd(priv, 0xa097, 0x15bc, 0x00000000);
1924 nv_mthd(priv, 0xa097, 0x156c, 0x00000000);
1925 nv_mthd(priv, 0xa097, 0x187c, 0x00000000);
1926 nv_mthd(priv, 0xa097, 0x1110, 0x00000001);
1927 nv_mthd(priv, 0xa097, 0x0dc0, 0x00000000);
1928 nv_mthd(priv, 0xa097, 0x0dc4, 0x00000000);
1929 nv_mthd(priv, 0xa097, 0x0dc8, 0x00000000);
1930 nv_mthd(priv, 0xa097, 0x1234, 0x00000000);
1931 nv_mthd(priv, 0xa097, 0x1690, 0x00000000);
1932 nv_mthd(priv, 0xa097, 0x12ac, 0x00000001);
1933 nv_mthd(priv, 0xa097, 0x0790, 0x00000000);
1934 nv_mthd(priv, 0xa097, 0x0794, 0x00000000);
1935 nv_mthd(priv, 0xa097, 0x0798, 0x00000000);
1936 nv_mthd(priv, 0xa097, 0x079c, 0x00000000);
1937 nv_mthd(priv, 0xa097, 0x07a0, 0x00000000);
1938 nv_mthd(priv, 0xa097, 0x077c, 0x00000000);
1939 nv_mthd(priv, 0xa097, 0x1000, 0x00000010);
1940 nv_mthd(priv, 0xa097, 0x10fc, 0x00000000);
1941 nv_mthd(priv, 0xa097, 0x1290, 0x00000000);
1942 nv_mthd(priv, 0xa097, 0x0218, 0x00000010);
1943 nv_mthd(priv, 0xa097, 0x12d8, 0x00000000);
1944 nv_mthd(priv, 0xa097, 0x12dc, 0x00000010);
1945 nv_mthd(priv, 0xa097, 0x0d94, 0x00000001);
1946 nv_mthd(priv, 0xa097, 0x155c, 0x00000000);
1947 nv_mthd(priv, 0xa097, 0x1560, 0x00000000);
1948 nv_mthd(priv, 0xa097, 0x1564, 0x00000fff);
1949 nv_mthd(priv, 0xa097, 0x1574, 0x00000000);
1950 nv_mthd(priv, 0xa097, 0x1578, 0x00000000);
1951 nv_mthd(priv, 0xa097, 0x157c, 0x000fffff);
1952 nv_mthd(priv, 0xa097, 0x1354, 0x00000000);
1953 nv_mthd(priv, 0xa097, 0x1610, 0x00000012);
1954 nv_mthd(priv, 0xa097, 0x1608, 0x00000000);
1955 nv_mthd(priv, 0xa097, 0x160c, 0x00000000);
1956 nv_mthd(priv, 0xa097, 0x260c, 0x00000000);
1957 nv_mthd(priv, 0xa097, 0x07ac, 0x00000000);
1958 nv_mthd(priv, 0xa097, 0x162c, 0x00000003);
1959 nv_mthd(priv, 0xa097, 0x0210, 0x00000000);
1960 nv_mthd(priv, 0xa097, 0x0320, 0x00000000);
1961 nv_mthd(priv, 0xa097, 0x0324, 0x3f800000);
1962 nv_mthd(priv, 0xa097, 0x0328, 0x3f800000);
1963 nv_mthd(priv, 0xa097, 0x032c, 0x3f800000);
1964 nv_mthd(priv, 0xa097, 0x0330, 0x3f800000);
1965 nv_mthd(priv, 0xa097, 0x0334, 0x3f800000);
1966 nv_mthd(priv, 0xa097, 0x0338, 0x3f800000);
1967 nv_mthd(priv, 0xa097, 0x0750, 0x00000000);
1968 nv_mthd(priv, 0xa097, 0x0760, 0x39291909);
1969 nv_mthd(priv, 0xa097, 0x0764, 0x79695949);
1970 nv_mthd(priv, 0xa097, 0x0768, 0xb9a99989);
1971 nv_mthd(priv, 0xa097, 0x076c, 0xf9e9d9c9);
1972 nv_mthd(priv, 0xa097, 0x0770, 0x30201000);
1973 nv_mthd(priv, 0xa097, 0x0774, 0x70605040);
1974 nv_mthd(priv, 0xa097, 0x0778, 0x00009080);
1975 nv_mthd(priv, 0xa097, 0x0780, 0x39291909);
1976 nv_mthd(priv, 0xa097, 0x0784, 0x79695949);
1977 nv_mthd(priv, 0xa097, 0x0788, 0xb9a99989);
1978 nv_mthd(priv, 0xa097, 0x078c, 0xf9e9d9c9);
1979 nv_mthd(priv, 0xa097, 0x07d0, 0x30201000);
1980 nv_mthd(priv, 0xa097, 0x07d4, 0x70605040);
1981 nv_mthd(priv, 0xa097, 0x07d8, 0x00009080);
1982 nv_mthd(priv, 0xa097, 0x037c, 0x00000001);
1983 nv_mthd(priv, 0xa097, 0x0740, 0x00000000);
1984 nv_mthd(priv, 0xa097, 0x0744, 0x00000000);
1985 nv_mthd(priv, 0xa097, 0x2600, 0x00000000);
1986 nv_mthd(priv, 0xa097, 0x1918, 0x00000000);
1987 nv_mthd(priv, 0xa097, 0x191c, 0x00000900);
1988 nv_mthd(priv, 0xa097, 0x1920, 0x00000405);
1989 nv_mthd(priv, 0xa097, 0x1308, 0x00000001);
1990 nv_mthd(priv, 0xa097, 0x1924, 0x00000000);
1991 nv_mthd(priv, 0xa097, 0x13ac, 0x00000000);
1992 nv_mthd(priv, 0xa097, 0x192c, 0x00000001);
1993 nv_mthd(priv, 0xa097, 0x193c, 0x00002c1c);
1994 nv_mthd(priv, 0xa097, 0x0d7c, 0x00000000);
1995 nv_mthd(priv, 0xa097, 0x0f8c, 0x00000000);
1996 nv_mthd(priv, 0xa097, 0x02c0, 0x00000001);
1997 nv_mthd(priv, 0xa097, 0x1510, 0x00000000);
1998 nv_mthd(priv, 0xa097, 0x1940, 0x00000000);
1999 nv_mthd(priv, 0xa097, 0x0ff4, 0x00000000);
2000 nv_mthd(priv, 0xa097, 0x0ff8, 0x00000000);
2001 nv_mthd(priv, 0xa097, 0x194c, 0x00000000);
2002 nv_mthd(priv, 0xa097, 0x1950, 0x00000000);
2003 nv_mthd(priv, 0xa097, 0x1968, 0x00000000);
2004 nv_mthd(priv, 0xa097, 0x1590, 0x0000003f);
2005 nv_mthd(priv, 0xa097, 0x07e8, 0x00000000);
2006 nv_mthd(priv, 0xa097, 0x07ec, 0x00000000);
2007 nv_mthd(priv, 0xa097, 0x07f0, 0x00000000);
2008 nv_mthd(priv, 0xa097, 0x07f4, 0x00000000);
2009 nv_mthd(priv, 0xa097, 0x196c, 0x00000011);
2010 nv_mthd(priv, 0xa097, 0x02e4, 0x0000b001);
2011 nv_mthd(priv, 0xa097, 0x036c, 0x00000000);
2012 nv_mthd(priv, 0xa097, 0x0370, 0x00000000);
2013 nv_mthd(priv, 0xa097, 0x197c, 0x00000000);
2014 nv_mthd(priv, 0xa097, 0x0fcc, 0x00000000);
2015 nv_mthd(priv, 0xa097, 0x0fd0, 0x00000000);
2016 nv_mthd(priv, 0xa097, 0x02d8, 0x00000040);
2017 nv_mthd(priv, 0xa097, 0x1980, 0x00000080);
2018 nv_mthd(priv, 0xa097, 0x1504, 0x00000080);
2019 nv_mthd(priv, 0xa097, 0x1984, 0x00000000);
2020 nv_mthd(priv, 0xa097, 0x0300, 0x00000001);
2021 nv_mthd(priv, 0xa097, 0x13a8, 0x00000000);
2022 nv_mthd(priv, 0xa097, 0x12ec, 0x00000000);
2023 nv_mthd(priv, 0xa097, 0x1310, 0x00000000);
2024 nv_mthd(priv, 0xa097, 0x1314, 0x00000001);
2025 nv_mthd(priv, 0xa097, 0x1380, 0x00000000);
2026 nv_mthd(priv, 0xa097, 0x1384, 0x00000001);
2027 nv_mthd(priv, 0xa097, 0x1388, 0x00000001);
2028 nv_mthd(priv, 0xa097, 0x138c, 0x00000001);
2029 nv_mthd(priv, 0xa097, 0x1390, 0x00000001);
2030 nv_mthd(priv, 0xa097, 0x1394, 0x00000000);
2031 nv_mthd(priv, 0xa097, 0x139c, 0x00000000);
2032 nv_mthd(priv, 0xa097, 0x1398, 0x00000000);
2033 nv_mthd(priv, 0xa097, 0x1594, 0x00000000);
2034 nv_mthd(priv, 0xa097, 0x1598, 0x00000001);
2035 nv_mthd(priv, 0xa097, 0x159c, 0x00000001);
2036 nv_mthd(priv, 0xa097, 0x15a0, 0x00000001);
2037 nv_mthd(priv, 0xa097, 0x15a4, 0x00000001);
2038 nv_mthd(priv, 0xa097, 0x0f54, 0x00000000);
2039 nv_mthd(priv, 0xa097, 0x0f58, 0x00000000);
2040 nv_mthd(priv, 0xa097, 0x0f5c, 0x00000000);
2041 nv_mthd(priv, 0xa097, 0x19bc, 0x00000000);
2042 nv_mthd(priv, 0xa097, 0x0f9c, 0x00000000);
2043 nv_mthd(priv, 0xa097, 0x0fa0, 0x00000000);
2044 nv_mthd(priv, 0xa097, 0x12cc, 0x00000000);
2045 nv_mthd(priv, 0xa097, 0x12e8, 0x00000000);
2046 nv_mthd(priv, 0xa097, 0x130c, 0x00000001);
2047 nv_mthd(priv, 0xa097, 0x1360, 0x00000000);
2048 nv_mthd(priv, 0xa097, 0x1364, 0x00000000);
2049 nv_mthd(priv, 0xa097, 0x1368, 0x00000000);
2050 nv_mthd(priv, 0xa097, 0x136c, 0x00000000);
2051 nv_mthd(priv, 0xa097, 0x1370, 0x00000000);
2052 nv_mthd(priv, 0xa097, 0x1374, 0x00000000);
2053 nv_mthd(priv, 0xa097, 0x1378, 0x00000000);
2054 nv_mthd(priv, 0xa097, 0x137c, 0x00000000);
2055 nv_mthd(priv, 0xa097, 0x133c, 0x00000001);
2056 nv_mthd(priv, 0xa097, 0x1340, 0x00000001);
2057 nv_mthd(priv, 0xa097, 0x1344, 0x00000002);
2058 nv_mthd(priv, 0xa097, 0x1348, 0x00000001);
2059 nv_mthd(priv, 0xa097, 0x134c, 0x00000001);
2060 nv_mthd(priv, 0xa097, 0x1350, 0x00000002);
2061 nv_mthd(priv, 0xa097, 0x1358, 0x00000001);
2062 nv_mthd(priv, 0xa097, 0x12e4, 0x00000000);
2063 nv_mthd(priv, 0xa097, 0x131c, 0x00000000);
2064 nv_mthd(priv, 0xa097, 0x1320, 0x00000000);
2065 nv_mthd(priv, 0xa097, 0x1324, 0x00000000);
2066 nv_mthd(priv, 0xa097, 0x1328, 0x00000000);
2067 nv_mthd(priv, 0xa097, 0x19c0, 0x00000000);
2068 nv_mthd(priv, 0xa097, 0x1140, 0x00000000);
2069 nv_mthd(priv, 0xa097, 0x19c4, 0x00000000);
2070 nv_mthd(priv, 0xa097, 0x19c8, 0x00001500);
2071 nv_mthd(priv, 0xa097, 0x135c, 0x00000000);
2072 nv_mthd(priv, 0xa097, 0x0f90, 0x00000000);
2073 nv_mthd(priv, 0xa097, 0x19e0, 0x00000001);
2074 nv_mthd(priv, 0xa097, 0x19e4, 0x00000001);
2075 nv_mthd(priv, 0xa097, 0x19e8, 0x00000001);
2076 nv_mthd(priv, 0xa097, 0x19ec, 0x00000001);
2077 nv_mthd(priv, 0xa097, 0x19f0, 0x00000001);
2078 nv_mthd(priv, 0xa097, 0x19f4, 0x00000001);
2079 nv_mthd(priv, 0xa097, 0x19f8, 0x00000001);
2080 nv_mthd(priv, 0xa097, 0x19fc, 0x00000001);
2081 nv_mthd(priv, 0xa097, 0x19cc, 0x00000001);
2082 nv_mthd(priv, 0xa097, 0x15b8, 0x00000000);
2083 nv_mthd(priv, 0xa097, 0x1a00, 0x00001111);
2084 nv_mthd(priv, 0xa097, 0x1a04, 0x00000000);
2085 nv_mthd(priv, 0xa097, 0x1a08, 0x00000000);
2086 nv_mthd(priv, 0xa097, 0x1a0c, 0x00000000);
2087 nv_mthd(priv, 0xa097, 0x1a10, 0x00000000);
2088 nv_mthd(priv, 0xa097, 0x1a14, 0x00000000);
2089 nv_mthd(priv, 0xa097, 0x1a18, 0x00000000);
2090 nv_mthd(priv, 0xa097, 0x1a1c, 0x00000000);
2091 nv_mthd(priv, 0xa097, 0x0d6c, 0xffff0000);
2092 nv_mthd(priv, 0xa097, 0x0d70, 0xffff0000);
2093 nv_mthd(priv, 0xa097, 0x10f8, 0x00001010);
2094 nv_mthd(priv, 0xa097, 0x0d80, 0x00000000);
2095 nv_mthd(priv, 0xa097, 0x0d84, 0x00000000);
2096 nv_mthd(priv, 0xa097, 0x0d88, 0x00000000);
2097 nv_mthd(priv, 0xa097, 0x0d8c, 0x00000000);
2098 nv_mthd(priv, 0xa097, 0x0d90, 0x00000000);
2099 nv_mthd(priv, 0xa097, 0x0da0, 0x00000000);
2100 nv_mthd(priv, 0xa097, 0x07a4, 0x00000000);
2101 nv_mthd(priv, 0xa097, 0x07a8, 0x00000000);
2102 nv_mthd(priv, 0xa097, 0x1508, 0x80000000);
2103 nv_mthd(priv, 0xa097, 0x150c, 0x40000000);
2104 nv_mthd(priv, 0xa097, 0x1668, 0x00000000);
2105 nv_mthd(priv, 0xa097, 0x0318, 0x00000008);
2106 nv_mthd(priv, 0xa097, 0x031c, 0x00000008);
2107 nv_mthd(priv, 0xa097, 0x0d9c, 0x00000001);
2108 nv_mthd(priv, 0xa097, 0x0374, 0x00000000);
2109 nv_mthd(priv, 0xa097, 0x0378, 0x00000020);
2110 nv_mthd(priv, 0xa097, 0x07dc, 0x00000000);
2111 nv_mthd(priv, 0xa097, 0x074c, 0x00000055);
2112 nv_mthd(priv, 0xa097, 0x1420, 0x00000003);
2113 nv_mthd(priv, 0xa097, 0x17bc, 0x00000000);
2114 nv_mthd(priv, 0xa097, 0x17c0, 0x00000000);
2115 nv_mthd(priv, 0xa097, 0x17c4, 0x00000001);
2116 nv_mthd(priv, 0xa097, 0x1008, 0x00000008);
2117 nv_mthd(priv, 0xa097, 0x100c, 0x00000040);
2118 nv_mthd(priv, 0xa097, 0x1010, 0x0000012c);
2119 nv_mthd(priv, 0xa097, 0x0d60, 0x00000040);
2120 nv_mthd(priv, 0xa097, 0x075c, 0x00000003);
2121 nv_mthd(priv, 0xa097, 0x1018, 0x00000020);
2122 nv_mthd(priv, 0xa097, 0x101c, 0x00000001);
2123 nv_mthd(priv, 0xa097, 0x1020, 0x00000020);
2124 nv_mthd(priv, 0xa097, 0x1024, 0x00000001);
2125 nv_mthd(priv, 0xa097, 0x1444, 0x00000000);
2126 nv_mthd(priv, 0xa097, 0x1448, 0x00000000);
2127 nv_mthd(priv, 0xa097, 0x144c, 0x00000000);
2128 nv_mthd(priv, 0xa097, 0x0360, 0x20164010);
2129 nv_mthd(priv, 0xa097, 0x0364, 0x00000020);
2130 nv_mthd(priv, 0xa097, 0x0368, 0x00000000);
2131 nv_mthd(priv, 0xa097, 0x0de4, 0x00000000);
2132 nv_mthd(priv, 0xa097, 0x0204, 0x00000006);
2133 nv_mthd(priv, 0xa097, 0x0208, 0x00000000);
2134 nv_mthd(priv, 0xa097, 0x02cc, 0x003fffff);
2135 nv_mthd(priv, 0xa097, 0x02d0, 0x003fffff);
2136 nv_mthd(priv, 0xa097, 0x1220, 0x00000005);
2137 nv_mthd(priv, 0xa097, 0x0fdc, 0x00000000);
2138 nv_mthd(priv, 0xa097, 0x0f98, 0x00400008);
2139 nv_mthd(priv, 0xa097, 0x1284, 0x08000080);
2140 nv_mthd(priv, 0xa097, 0x1450, 0x00400008);
2141 nv_mthd(priv, 0xa097, 0x1454, 0x08000080);
2142 nv_mthd(priv, 0xa097, 0x0214, 0x00000000);
2143}
2144
2145static void
2146nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
2147{
2148 nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
2149 nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
2150 nv_mthd(priv, 0x902d, 0x0208, 0x00000020);
2151 nv_mthd(priv, 0x902d, 0x020c, 0x00000001);
2152 nv_mthd(priv, 0x902d, 0x0210, 0x00000000);
2153 nv_mthd(priv, 0x902d, 0x0214, 0x00000080);
2154 nv_mthd(priv, 0x902d, 0x0218, 0x00000100);
2155 nv_mthd(priv, 0x902d, 0x021c, 0x00000100);
2156 nv_mthd(priv, 0x902d, 0x0220, 0x00000000);
2157 nv_mthd(priv, 0x902d, 0x0224, 0x00000000);
2158 nv_mthd(priv, 0x902d, 0x0230, 0x000000cf);
2159 nv_mthd(priv, 0x902d, 0x0234, 0x00000001);
2160 nv_mthd(priv, 0x902d, 0x0238, 0x00000020);
2161 nv_mthd(priv, 0x902d, 0x023c, 0x00000001);
2162 nv_mthd(priv, 0x902d, 0x0244, 0x00000080);
2163 nv_mthd(priv, 0x902d, 0x0248, 0x00000100);
2164 nv_mthd(priv, 0x902d, 0x024c, 0x00000100);
2165 nv_mthd(priv, 0x902d, 0x3410, 0x00000000);
2166}
2167
2168static void
2169nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
2170{
2171 nv_wr32(priv, 0x404010, 0x0);
2172 nv_wr32(priv, 0x404014, 0x0);
2173 nv_wr32(priv, 0x404018, 0x0);
2174 nv_wr32(priv, 0x40401c, 0x0);
2175 nv_wr32(priv, 0x404020, 0x0);
2176 nv_wr32(priv, 0x404024, 0xe000);
2177 nv_wr32(priv, 0x404028, 0x0);
2178 nv_wr32(priv, 0x4040a8, 0x0);
2179 nv_wr32(priv, 0x4040ac, 0x0);
2180 nv_wr32(priv, 0x4040b0, 0x0);
2181 nv_wr32(priv, 0x4040b4, 0x0);
2182 nv_wr32(priv, 0x4040b8, 0x0);
2183 nv_wr32(priv, 0x4040bc, 0x0);
2184 nv_wr32(priv, 0x4040c0, 0x0);
2185 nv_wr32(priv, 0x4040c4, 0x0);
2186 nv_wr32(priv, 0x4040c8, 0xf800008f);
2187 nv_wr32(priv, 0x4040d0, 0x0);
2188 nv_wr32(priv, 0x4040d4, 0x0);
2189 nv_wr32(priv, 0x4040d8, 0x0);
2190 nv_wr32(priv, 0x4040dc, 0x0);
2191 nv_wr32(priv, 0x4040e0, 0x0);
2192 nv_wr32(priv, 0x4040e4, 0x0);
2193 nv_wr32(priv, 0x4040e8, 0x1000);
2194 nv_wr32(priv, 0x4040f8, 0x0);
2195 nv_wr32(priv, 0x404130, 0x0);
2196 nv_wr32(priv, 0x404134, 0x0);
2197 nv_wr32(priv, 0x404138, 0x20000040);
2198 nv_wr32(priv, 0x404150, 0x2e);
2199 nv_wr32(priv, 0x404154, 0x400);
2200 nv_wr32(priv, 0x404158, 0x200);
2201 nv_wr32(priv, 0x404164, 0x55);
2202 nv_wr32(priv, 0x4041a0, 0x0);
2203 nv_wr32(priv, 0x4041a4, 0x0);
2204 nv_wr32(priv, 0x4041a8, 0x0);
2205 nv_wr32(priv, 0x4041ac, 0x0);
2206 nv_wr32(priv, 0x404200, 0x0);
2207 nv_wr32(priv, 0x404204, 0x0);
2208 nv_wr32(priv, 0x404208, 0x0);
2209 nv_wr32(priv, 0x40420c, 0x0);
2210}
2211
2212static void
2213nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
2214{
2215 nv_wr32(priv, 0x404404, 0x0);
2216 nv_wr32(priv, 0x404408, 0x0);
2217 nv_wr32(priv, 0x40440c, 0x0);
2218 nv_wr32(priv, 0x404410, 0x0);
2219 nv_wr32(priv, 0x404414, 0x0);
2220 nv_wr32(priv, 0x404418, 0x0);
2221 nv_wr32(priv, 0x40441c, 0x0);
2222 nv_wr32(priv, 0x404420, 0x0);
2223 nv_wr32(priv, 0x404424, 0x0);
2224 nv_wr32(priv, 0x404428, 0x0);
2225 nv_wr32(priv, 0x40442c, 0x0);
2226 nv_wr32(priv, 0x404430, 0x0);
2227 nv_wr32(priv, 0x404434, 0x0);
2228 nv_wr32(priv, 0x404438, 0x0);
2229 nv_wr32(priv, 0x404460, 0x0);
2230 nv_wr32(priv, 0x404464, 0x0);
2231 nv_wr32(priv, 0x404468, 0xffffff);
2232 nv_wr32(priv, 0x40446c, 0x0);
2233 nv_wr32(priv, 0x404480, 0x1);
2234 nv_wr32(priv, 0x404498, 0x1);
2235}
2236
2237static void
2238nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
2239{
2240 nv_wr32(priv, 0x404604, 0x14);
2241 nv_wr32(priv, 0x404608, 0x0);
2242 nv_wr32(priv, 0x40460c, 0x3fff);
2243 nv_wr32(priv, 0x404610, 0x100);
2244 nv_wr32(priv, 0x404618, 0x0);
2245 nv_wr32(priv, 0x40461c, 0x0);
2246 nv_wr32(priv, 0x404620, 0x0);
2247 nv_wr32(priv, 0x404624, 0x0);
2248 nv_wr32(priv, 0x40462c, 0x0);
2249 nv_wr32(priv, 0x404630, 0x0);
2250 nv_wr32(priv, 0x404640, 0x0);
2251 nv_wr32(priv, 0x404654, 0x0);
2252 nv_wr32(priv, 0x404660, 0x0);
2253 nv_wr32(priv, 0x404678, 0x0);
2254 nv_wr32(priv, 0x40467c, 0x2);
2255 nv_wr32(priv, 0x404680, 0x0);
2256 nv_wr32(priv, 0x404684, 0x0);
2257 nv_wr32(priv, 0x404688, 0x0);
2258 nv_wr32(priv, 0x40468c, 0x0);
2259 nv_wr32(priv, 0x404690, 0x0);
2260 nv_wr32(priv, 0x404694, 0x0);
2261 nv_wr32(priv, 0x404698, 0x0);
2262 nv_wr32(priv, 0x40469c, 0x0);
2263 nv_wr32(priv, 0x4046a0, 0x7f0080);
2264 nv_wr32(priv, 0x4046a4, 0x0);
2265 nv_wr32(priv, 0x4046a8, 0x0);
2266 nv_wr32(priv, 0x4046ac, 0x0);
2267 nv_wr32(priv, 0x4046b0, 0x0);
2268 nv_wr32(priv, 0x4046b4, 0x0);
2269 nv_wr32(priv, 0x4046b8, 0x0);
2270 nv_wr32(priv, 0x4046bc, 0x0);
2271 nv_wr32(priv, 0x4046c0, 0x0);
2272 nv_wr32(priv, 0x4046c8, 0x0);
2273 nv_wr32(priv, 0x4046cc, 0x0);
2274 nv_wr32(priv, 0x4046d0, 0x0);
2275}
2276
2277static void
2278nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
2279{
2280 nv_wr32(priv, 0x404700, 0x0);
2281 nv_wr32(priv, 0x404704, 0x0);
2282 nv_wr32(priv, 0x404708, 0x0);
2283 nv_wr32(priv, 0x404718, 0x0);
2284 nv_wr32(priv, 0x40471c, 0x0);
2285 nv_wr32(priv, 0x404720, 0x0);
2286 nv_wr32(priv, 0x404724, 0x0);
2287 nv_wr32(priv, 0x404728, 0x0);
2288 nv_wr32(priv, 0x40472c, 0x0);
2289 nv_wr32(priv, 0x404730, 0x0);
2290 nv_wr32(priv, 0x404734, 0x100);
2291 nv_wr32(priv, 0x404738, 0x0);
2292 nv_wr32(priv, 0x40473c, 0x0);
2293 nv_wr32(priv, 0x404744, 0x0);
2294 nv_wr32(priv, 0x404748, 0x0);
2295 nv_wr32(priv, 0x404754, 0x0);
2296}
2297
2298static void
2299nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
2300{
2301 nv_wr32(priv, 0x405800, 0xf8000bf);
2302 nv_wr32(priv, 0x405830, 0x2180648);
2303 nv_wr32(priv, 0x405834, 0x8000000);
2304 nv_wr32(priv, 0x405838, 0x0);
2305 nv_wr32(priv, 0x405854, 0x0);
2306 nv_wr32(priv, 0x405870, 0x1);
2307 nv_wr32(priv, 0x405874, 0x1);
2308 nv_wr32(priv, 0x405878, 0x1);
2309 nv_wr32(priv, 0x40587c, 0x1);
2310 nv_wr32(priv, 0x405a00, 0x0);
2311 nv_wr32(priv, 0x405a04, 0x0);
2312 nv_wr32(priv, 0x405a18, 0x0);
2313 nv_wr32(priv, 0x405b00, 0x0);
2314 nv_wr32(priv, 0x405b10, 0x1000);
2315}
2316
2317static void
2318nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
2319{
2320 nv_wr32(priv, 0x406020, 0x4103c1);
2321 nv_wr32(priv, 0x406028, 0x1);
2322 nv_wr32(priv, 0x40602c, 0x1);
2323 nv_wr32(priv, 0x406030, 0x1);
2324 nv_wr32(priv, 0x406034, 0x1);
2325}
2326
2327static void
2328nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
2329{
2330 nv_wr32(priv, 0x4064a8, 0x0);
2331 nv_wr32(priv, 0x4064ac, 0x3fff);
2332 nv_wr32(priv, 0x4064b4, 0x0);
2333 nv_wr32(priv, 0x4064b8, 0x0);
2334 nv_wr32(priv, 0x4064c0, 0x801a00f0);
2335 nv_wr32(priv, 0x4064c4, 0x192ffff);
2336 nv_wr32(priv, 0x4064c8, 0x1800600);
2337 nv_wr32(priv, 0x4064cc, 0x0);
2338 nv_wr32(priv, 0x4064d0, 0x0);
2339 nv_wr32(priv, 0x4064d4, 0x0);
2340 nv_wr32(priv, 0x4064d8, 0x0);
2341 nv_wr32(priv, 0x4064dc, 0x0);
2342 nv_wr32(priv, 0x4064e0, 0x0);
2343 nv_wr32(priv, 0x4064e4, 0x0);
2344 nv_wr32(priv, 0x4064e8, 0x0);
2345 nv_wr32(priv, 0x4064ec, 0x0);
2346 nv_wr32(priv, 0x4064fc, 0x22a);
2347}
2348
2349static void
2350nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
2351{
2352 nv_wr32(priv, 0x407040, 0x0);
2353}
2354
2355static void
2356nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
2357{
2358 nv_wr32(priv, 0x407804, 0x23);
2359 nv_wr32(priv, 0x40780c, 0xa418820);
2360 nv_wr32(priv, 0x407810, 0x62080e6);
2361 nv_wr32(priv, 0x407814, 0x20398a4);
2362 nv_wr32(priv, 0x407818, 0xe629062);
2363 nv_wr32(priv, 0x40781c, 0xa418820);
2364 nv_wr32(priv, 0x407820, 0xe6);
2365 nv_wr32(priv, 0x4078bc, 0x103);
2366}
2367
2368static void
2369nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
2370{
2371 nv_wr32(priv, 0x408000, 0x0);
2372 nv_wr32(priv, 0x408004, 0x0);
2373 nv_wr32(priv, 0x408008, 0x30);
2374 nv_wr32(priv, 0x40800c, 0x0);
2375 nv_wr32(priv, 0x408010, 0x0);
2376 nv_wr32(priv, 0x408014, 0x69);
2377 nv_wr32(priv, 0x408018, 0xe100e100);
2378 nv_wr32(priv, 0x408064, 0x0);
2379}
2380
2381static void
2382nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
2383{
2384 nv_wr32(priv, 0x408800, 0x2802a3c);
2385 nv_wr32(priv, 0x408804, 0x40);
2386 nv_wr32(priv, 0x408808, 0x1043e005);
2387 nv_wr32(priv, 0x408840, 0xb);
2388 nv_wr32(priv, 0x408900, 0x3080b801);
2389 nv_wr32(priv, 0x408904, 0x62000001);
2390 nv_wr32(priv, 0x408908, 0xc8102f);
2391 nv_wr32(priv, 0x408980, 0x11d);
2392}
2393
2394static void
2395nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
2396{
2397 nv_wr32(priv, 0x418380, 0x16);
2398 nv_wr32(priv, 0x418400, 0x38004e00);
2399 nv_wr32(priv, 0x418404, 0x71e0ffff);
2400 nv_wr32(priv, 0x41840c, 0x1008);
2401 nv_wr32(priv, 0x418410, 0xfff0fff);
2402 nv_wr32(priv, 0x418414, 0x2200fff);
2403 nv_wr32(priv, 0x418450, 0x0);
2404 nv_wr32(priv, 0x418454, 0x0);
2405 nv_wr32(priv, 0x418458, 0x0);
2406 nv_wr32(priv, 0x41845c, 0x0);
2407 nv_wr32(priv, 0x418460, 0x0);
2408 nv_wr32(priv, 0x418464, 0x0);
2409 nv_wr32(priv, 0x418468, 0x1);
2410 nv_wr32(priv, 0x41846c, 0x0);
2411 nv_wr32(priv, 0x418470, 0x0);
2412 nv_wr32(priv, 0x418600, 0x1f);
2413 nv_wr32(priv, 0x418684, 0xf);
2414 nv_wr32(priv, 0x418700, 0x2);
2415 nv_wr32(priv, 0x418704, 0x80);
2416 nv_wr32(priv, 0x418708, 0x0);
2417 nv_wr32(priv, 0x41870c, 0x0);
2418 nv_wr32(priv, 0x418710, 0x0);
2419 nv_wr32(priv, 0x418800, 0x7006860a);
2420 nv_wr32(priv, 0x418808, 0x0);
2421 nv_wr32(priv, 0x41880c, 0x0);
2422 nv_wr32(priv, 0x418810, 0x0);
2423 nv_wr32(priv, 0x418828, 0x44);
2424 nv_wr32(priv, 0x418830, 0x10000001);
2425 nv_wr32(priv, 0x4188d8, 0x8);
2426 nv_wr32(priv, 0x4188e0, 0x1000000);
2427 nv_wr32(priv, 0x4188e8, 0x0);
2428 nv_wr32(priv, 0x4188ec, 0x0);
2429 nv_wr32(priv, 0x4188f0, 0x0);
2430 nv_wr32(priv, 0x4188f4, 0x0);
2431 nv_wr32(priv, 0x4188f8, 0x0);
2432 nv_wr32(priv, 0x4188fc, 0x20100018);
2433 nv_wr32(priv, 0x41891c, 0xff00ff);
2434 nv_wr32(priv, 0x418924, 0x0);
2435 nv_wr32(priv, 0x418928, 0xffff00);
2436 nv_wr32(priv, 0x41892c, 0xff00);
2437 nv_wr32(priv, 0x418a00, 0x0);
2438 nv_wr32(priv, 0x418a04, 0x0);
2439 nv_wr32(priv, 0x418a08, 0x0);
2440 nv_wr32(priv, 0x418a0c, 0x10000);
2441 nv_wr32(priv, 0x418a10, 0x0);
2442 nv_wr32(priv, 0x418a14, 0x0);
2443 nv_wr32(priv, 0x418a18, 0x0);
2444 nv_wr32(priv, 0x418a20, 0x0);
2445 nv_wr32(priv, 0x418a24, 0x0);
2446 nv_wr32(priv, 0x418a28, 0x0);
2447 nv_wr32(priv, 0x418a2c, 0x10000);
2448 nv_wr32(priv, 0x418a30, 0x0);
2449 nv_wr32(priv, 0x418a34, 0x0);
2450 nv_wr32(priv, 0x418a38, 0x0);
2451 nv_wr32(priv, 0x418a40, 0x0);
2452 nv_wr32(priv, 0x418a44, 0x0);
2453 nv_wr32(priv, 0x418a48, 0x0);
2454 nv_wr32(priv, 0x418a4c, 0x10000);
2455 nv_wr32(priv, 0x418a50, 0x0);
2456 nv_wr32(priv, 0x418a54, 0x0);
2457 nv_wr32(priv, 0x418a58, 0x0);
2458 nv_wr32(priv, 0x418a60, 0x0);
2459 nv_wr32(priv, 0x418a64, 0x0);
2460 nv_wr32(priv, 0x418a68, 0x0);
2461 nv_wr32(priv, 0x418a6c, 0x10000);
2462 nv_wr32(priv, 0x418a70, 0x0);
2463 nv_wr32(priv, 0x418a74, 0x0);
2464 nv_wr32(priv, 0x418a78, 0x0);
2465 nv_wr32(priv, 0x418a80, 0x0);
2466 nv_wr32(priv, 0x418a84, 0x0);
2467 nv_wr32(priv, 0x418a88, 0x0);
2468 nv_wr32(priv, 0x418a8c, 0x10000);
2469 nv_wr32(priv, 0x418a90, 0x0);
2470 nv_wr32(priv, 0x418a94, 0x0);
2471 nv_wr32(priv, 0x418a98, 0x0);
2472 nv_wr32(priv, 0x418aa0, 0x0);
2473 nv_wr32(priv, 0x418aa4, 0x0);
2474 nv_wr32(priv, 0x418aa8, 0x0);
2475 nv_wr32(priv, 0x418aac, 0x10000);
2476 nv_wr32(priv, 0x418ab0, 0x0);
2477 nv_wr32(priv, 0x418ab4, 0x0);
2478 nv_wr32(priv, 0x418ab8, 0x0);
2479 nv_wr32(priv, 0x418ac0, 0x0);
2480 nv_wr32(priv, 0x418ac4, 0x0);
2481 nv_wr32(priv, 0x418ac8, 0x0);
2482 nv_wr32(priv, 0x418acc, 0x10000);
2483 nv_wr32(priv, 0x418ad0, 0x0);
2484 nv_wr32(priv, 0x418ad4, 0x0);
2485 nv_wr32(priv, 0x418ad8, 0x0);
2486 nv_wr32(priv, 0x418ae0, 0x0);
2487 nv_wr32(priv, 0x418ae4, 0x0);
2488 nv_wr32(priv, 0x418ae8, 0x0);
2489 nv_wr32(priv, 0x418aec, 0x10000);
2490 nv_wr32(priv, 0x418af0, 0x0);
2491 nv_wr32(priv, 0x418af4, 0x0);
2492 nv_wr32(priv, 0x418af8, 0x0);
2493 nv_wr32(priv, 0x418b00, 0x6);
2494 nv_wr32(priv, 0x418b08, 0xa418820);
2495 nv_wr32(priv, 0x418b0c, 0x62080e6);
2496 nv_wr32(priv, 0x418b10, 0x20398a4);
2497 nv_wr32(priv, 0x418b14, 0xe629062);
2498 nv_wr32(priv, 0x418b18, 0xa418820);
2499 nv_wr32(priv, 0x418b1c, 0xe6);
2500 nv_wr32(priv, 0x418bb8, 0x103);
2501 nv_wr32(priv, 0x418c08, 0x1);
2502 nv_wr32(priv, 0x418c10, 0x0);
2503 nv_wr32(priv, 0x418c14, 0x0);
2504 nv_wr32(priv, 0x418c18, 0x0);
2505 nv_wr32(priv, 0x418c1c, 0x0);
2506 nv_wr32(priv, 0x418c20, 0x0);
2507 nv_wr32(priv, 0x418c24, 0x0);
2508 nv_wr32(priv, 0x418c28, 0x0);
2509 nv_wr32(priv, 0x418c2c, 0x0);
2510 nv_wr32(priv, 0x418c40, 0xffffffff);
2511 nv_wr32(priv, 0x418c6c, 0x1);
2512 nv_wr32(priv, 0x418c80, 0x20200004);
2513 nv_wr32(priv, 0x418c8c, 0x1);
2514 nv_wr32(priv, 0x419000, 0x780);
2515 nv_wr32(priv, 0x419004, 0x0);
2516 nv_wr32(priv, 0x419008, 0x0);
2517 nv_wr32(priv, 0x419014, 0x4);
2518}
2519
2520static void
2521nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
2522{
2523 nv_wr32(priv, 0x419848, 0x0);
2524 nv_wr32(priv, 0x419864, 0x129);
2525 nv_wr32(priv, 0x419888, 0x0);
2526 nv_wr32(priv, 0x419a00, 0xf0);
2527 nv_wr32(priv, 0x419a04, 0x1);
2528 nv_wr32(priv, 0x419a08, 0x21);
2529 nv_wr32(priv, 0x419a0c, 0x20000);
2530 nv_wr32(priv, 0x419a10, 0x0);
2531 nv_wr32(priv, 0x419a14, 0x200);
2532 nv_wr32(priv, 0x419a1c, 0xc000);
2533 nv_wr32(priv, 0x419a20, 0x800);
2534 nv_wr32(priv, 0x419a30, 0x1);
2535 nv_wr32(priv, 0x419ac4, 0x37f440);
2536 nv_wr32(priv, 0x419c00, 0xa);
2537 nv_wr32(priv, 0x419c04, 0x80000006);
2538 nv_wr32(priv, 0x419c08, 0x2);
2539 nv_wr32(priv, 0x419c20, 0x0);
2540 nv_wr32(priv, 0x419c24, 0x84210);
2541 nv_wr32(priv, 0x419c28, 0x3efbefbe);
2542 nv_wr32(priv, 0x419ce8, 0x0);
2543 nv_wr32(priv, 0x419cf4, 0x3203);
2544 nv_wr32(priv, 0x419e04, 0x0);
2545 nv_wr32(priv, 0x419e08, 0x0);
2546 nv_wr32(priv, 0x419e0c, 0x0);
2547 nv_wr32(priv, 0x419e10, 0x402);
2548 nv_wr32(priv, 0x419e44, 0x13eff2);
2549 nv_wr32(priv, 0x419e48, 0x0);
2550 nv_wr32(priv, 0x419e4c, 0x7f);
2551 nv_wr32(priv, 0x419e50, 0x0);
2552 nv_wr32(priv, 0x419e54, 0x0);
2553 nv_wr32(priv, 0x419e58, 0x0);
2554 nv_wr32(priv, 0x419e5c, 0x0);
2555 nv_wr32(priv, 0x419e60, 0x0);
2556 nv_wr32(priv, 0x419e64, 0x0);
2557 nv_wr32(priv, 0x419e68, 0x0);
2558 nv_wr32(priv, 0x419e6c, 0x0);
2559 nv_wr32(priv, 0x419e70, 0x0);
2560 nv_wr32(priv, 0x419e74, 0x0);
2561 nv_wr32(priv, 0x419e78, 0x0);
2562 nv_wr32(priv, 0x419e7c, 0x0);
2563 nv_wr32(priv, 0x419e80, 0x0);
2564 nv_wr32(priv, 0x419e84, 0x0);
2565 nv_wr32(priv, 0x419e88, 0x0);
2566 nv_wr32(priv, 0x419e8c, 0x0);
2567 nv_wr32(priv, 0x419e90, 0x0);
2568 nv_wr32(priv, 0x419e94, 0x0);
2569 nv_wr32(priv, 0x419e98, 0x0);
2570 nv_wr32(priv, 0x419eac, 0x1fcf);
2571 nv_wr32(priv, 0x419eb0, 0xd3f);
2572 nv_wr32(priv, 0x419ec8, 0x1304f);
2573 nv_wr32(priv, 0x419f30, 0x0);
2574 nv_wr32(priv, 0x419f34, 0x0);
2575 nv_wr32(priv, 0x419f38, 0x0);
2576 nv_wr32(priv, 0x419f3c, 0x0);
2577 nv_wr32(priv, 0x419f40, 0x0);
2578 nv_wr32(priv, 0x419f44, 0x0);
2579 nv_wr32(priv, 0x419f48, 0x0);
2580 nv_wr32(priv, 0x419f4c, 0x0);
2581 nv_wr32(priv, 0x419f58, 0x0);
2582 nv_wr32(priv, 0x419f78, 0xb);
2583}
2584
2585static void
2586nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
2587{
2588 nv_wr32(priv, 0x41be24, 0x6);
2589 nv_wr32(priv, 0x41bec0, 0x12180000);
2590 nv_wr32(priv, 0x41bec4, 0x37f7f);
2591 nv_wr32(priv, 0x41bee4, 0x6480430);
2592 nv_wr32(priv, 0x41bf00, 0xa418820);
2593 nv_wr32(priv, 0x41bf04, 0x62080e6);
2594 nv_wr32(priv, 0x41bf08, 0x20398a4);
2595 nv_wr32(priv, 0x41bf0c, 0xe629062);
2596 nv_wr32(priv, 0x41bf10, 0xa418820);
2597 nv_wr32(priv, 0x41bf14, 0xe6);
2598 nv_wr32(priv, 0x41bfd0, 0x900103);
2599 nv_wr32(priv, 0x41bfe0, 0x400001);
2600 nv_wr32(priv, 0x41bfe4, 0x0);
2601}
2602
2603int
2604nve0_grctx_generate(struct nvc0_graph_priv *priv)
2605{
2606 struct nvc0_grctx info;
2607 int ret, i, gpc, tpc, id;
2608 u32 data[6] = {}, data2[2] = {}, tmp;
2609 u32 tpc_set = 0, tpc_mask = 0;
2610 u32 magic[GPC_MAX][2], offset;
2611 u8 tpcnr[GPC_MAX], a, b;
2612 u8 shift, ntpcv;
2613
2614 ret = nvc0_grctx_init(priv, &info);
2615 if (ret)
2616 return ret;
2617
2618 nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
2619 nv_wr32(priv, 0x400204, 0x00000000);
2620 nv_wr32(priv, 0x400208, 0x00000000);
2621
2622 nve0_graph_generate_unk40xx(priv);
2623 nve0_graph_generate_unk44xx(priv);
2624 nve0_graph_generate_unk46xx(priv);
2625 nve0_graph_generate_unk47xx(priv);
2626 nve0_graph_generate_unk58xx(priv);
2627 nve0_graph_generate_unk60xx(priv);
2628 nve0_graph_generate_unk64xx(priv);
2629 nve0_graph_generate_unk70xx(priv);
2630 nve0_graph_generate_unk78xx(priv);
2631 nve0_graph_generate_unk80xx(priv);
2632 nve0_graph_generate_unk88xx(priv);
2633 nve0_graph_generate_gpc(priv);
2634 nve0_graph_generate_tpc(priv);
2635 nve0_graph_generate_tpcunk(priv);
2636
2637 nv_wr32(priv, 0x404154, 0x0);
2638
2639 mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
2640 mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
2641 mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
2642 mmio_list(0x40800c, 0x00000000, 8, 1);
2643 mmio_list(0x408010, 0x80000000, 0, 0);
2644 mmio_list(0x419004, 0x00000000, 8, 1);
2645 mmio_list(0x419008, 0x00000000, 0, 0);
2646 mmio_list(0x4064cc, 0x80000000, 0, 0);
2647 mmio_list(0x408004, 0x00000000, 8, 0);
2648 mmio_list(0x408008, 0x80000030, 0, 0);
2649 mmio_list(0x418808, 0x00000000, 8, 0);
2650 mmio_list(0x41880c, 0x80000030, 0, 0);
2651 mmio_list(0x4064c8, 0x01800600, 0, 0);
2652 mmio_list(0x418810, 0x80000000, 12, 2);
2653 mmio_list(0x419848, 0x10000000, 12, 2);
2654 mmio_list(0x405830, 0x02180648, 0, 0);
2655 mmio_list(0x4064c4, 0x0192ffff, 0, 0);
2656 for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
2657 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
2658 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
2659 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
2660 magic[gpc][1] = 0x00000000 | (magic1 << 16);
2661 offset += 0x0324 * priv->tpc_nr[gpc];
2662 }
2663 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2664 mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
2665 mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
2666 offset += 0x07ff * priv->tpc_nr[gpc];
2667 }
2668 mmio_list(0x17e91c, 0x06060609, 0, 0);
2669 mmio_list(0x17e920, 0x00090a05, 0, 0);
2670
2671 nv_wr32(priv, 0x418c6c, 0x1);
2672 nv_wr32(priv, 0x41980c, 0x10);
2673 nv_wr32(priv, 0x41be08, 0x4);
2674 nv_wr32(priv, 0x4064c0, 0x801a00f0);
2675 nv_wr32(priv, 0x405800, 0xf8000bf);
2676 nv_wr32(priv, 0x419c00, 0xa);
2677
2678 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2679 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2680 if (tpc < priv->tpc_nr[gpc]) {
2681 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
2682 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
2683 nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2684 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
2685 }
2686
2687 nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2688 nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2689 }
2690 }
2691
2692 tmp = 0;
2693 for (i = 0; i < priv->gpc_nr; i++)
2694 tmp |= priv->tpc_nr[i] << (i * 4);
2695 nv_wr32(priv, 0x406028, tmp);
2696 nv_wr32(priv, 0x405870, tmp);
2697
2698 nv_wr32(priv, 0x40602c, 0x0);
2699 nv_wr32(priv, 0x405874, 0x0);
2700 nv_wr32(priv, 0x406030, 0x0);
2701 nv_wr32(priv, 0x405878, 0x0);
2702 nv_wr32(priv, 0x406034, 0x0);
2703 nv_wr32(priv, 0x40587c, 0x0);
2704
2705 /* calculate first set of magics */
2706 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2707
2708 gpc = -1;
2709 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2710 do {
2711 gpc = (gpc + 1) % priv->gpc_nr;
2712 } while (!tpcnr[gpc]);
2713 tpcnr[gpc]--;
2714
2715 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2716 }
2717
2718 for (; tpc < 32; tpc++)
2719 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2720
2721 /* and the second... */
2722 shift = 0;
2723 ntpcv = priv->tpc_total;
2724 while (!(ntpcv & (1 << 4))) {
2725 ntpcv <<= 1;
2726 shift++;
2727 }
2728
2729 data2[0] = ntpcv << 16;
2730 data2[0] |= shift << 21;
2731 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2732 data2[0] |= priv->tpc_total << 8;
2733 data2[0] |= priv->magic_not_rop_nr;
2734 for (i = 1; i < 7; i++)
2735 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2736
2737 /* and write it all the various parts of PGRAPH */
2738 nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2739 for (i = 0; i < 6; i++)
2740 nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
2741
2742 nv_wr32(priv, 0x41bfd0, data2[0]);
2743 nv_wr32(priv, 0x41bfe4, data2[1]);
2744 for (i = 0; i < 6; i++)
2745 nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
2746
2747 nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2748 for (i = 0; i < 6; i++)
2749 nv_wr32(priv, 0x40780c + (i * 4), data[i]);
2750
2751
2752 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2753 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2754 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2755
2756 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2757 a = (i * (priv->tpc_total - 1)) / 32;
2758 if (a != b) {
2759 b = a;
2760 do {
2761 gpc = (gpc + 1) % priv->gpc_nr;
2762 } while (!tpcnr[gpc]);
2763 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2764
2765 tpc_set |= 1 << ((gpc * 8) + tpc);
2766 }
2767
2768 nv_wr32(priv, 0x406800 + (i * 0x20), tpc_set);
2769 nv_wr32(priv, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2770 }
2771
2772 for (i = 0; i < 8; i++)
2773 nv_wr32(priv, 0x4064d0 + (i * 0x04), 0x00000000);
2774
2775 nv_wr32(priv, 0x405b00, 0x201);
2776 nv_wr32(priv, 0x408850, 0x2);
2777 nv_wr32(priv, 0x408958, 0x2);
2778 nv_wr32(priv, 0x419f78, 0xa);
2779
2780 nve0_grctx_generate_icmd(priv);
2781 nve0_grctx_generate_a097(priv);
2782 nve0_grctx_generate_902d(priv);
2783
2784 nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
2785 nv_wr32(priv, 0x418800, 0x7026860a); //XXX
2786 nv_wr32(priv, 0x41be10, 0x00bb8bc7); //XXX
2787 return nvc0_grctx_fini(&info);
2788}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
index 15272be33b66..b86cc60dcd56 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc
@@ -24,7 +24,7 @@
24 */ 24 */
25 25
26/* To build: 26/* To build:
27 * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h 27 * m4 gpcnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o gpcnvc0.fuc.h
28 */ 28 */
29 29
30/* TODO 30/* TODO
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35.section #nvc0_grgpc_data 35.section #nvc0_grgpc_data
36include(`nvc0_graph.fuc') 36include(`nvc0.fuc')
37gpc_id: .b32 0 37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0 38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0 39gpc_mmio_list_tail: .b32 0
@@ -209,11 +209,11 @@ nvd9_tpc_mmio_tail:
209.section #nvc0_grgpc_code 209.section #nvc0_grgpc_code
210bra #init 210bra #init
211define(`include_code') 211define(`include_code')
212include(`nvc0_graph.fuc') 212include(`nvc0.fuc')
213 213
214// reports an exception to the host 214// reports an exception to the host
215// 215//
216// In: $r15 error code (see nvc0_graph.fuc) 216// In: $r15 error code (see nvc0.fuc)
217// 217//
218error: 218error:
219 push $r14 219 push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
index a988b8ad00ac..96050ddb22ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnvc0.fuc.h
@@ -1,11 +1,19 @@
1uint32_t nvc0_grgpc_data[] = { 1uint32_t nvc0_grgpc_data[] = {
2/* 0x0000: gpc_id */
2 0x00000000, 3 0x00000000,
4/* 0x0004: gpc_mmio_list_head */
3 0x00000000, 5 0x00000000,
6/* 0x0008: gpc_mmio_list_tail */
4 0x00000000, 7 0x00000000,
8/* 0x000c: tpc_count */
5 0x00000000, 9 0x00000000,
10/* 0x0010: tpc_mask */
6 0x00000000, 11 0x00000000,
12/* 0x0014: tpc_mmio_list_head */
7 0x00000000, 13 0x00000000,
14/* 0x0018: tpc_mmio_list_tail */
8 0x00000000, 15 0x00000000,
16/* 0x001c: cmd_queue */
9 0x00000000, 17 0x00000000,
10 0x00000000, 18 0x00000000,
11 0x00000000, 19 0x00000000,
@@ -24,6 +32,7 @@ uint32_t nvc0_grgpc_data[] = {
24 0x00000000, 32 0x00000000,
25 0x00000000, 33 0x00000000,
26 0x00000000, 34 0x00000000,
35/* 0x0064: chipsets */
27 0x000000c0, 36 0x000000c0,
28 0x012800c8, 37 0x012800c8,
29 0x01e40194, 38 0x01e40194,
@@ -49,6 +58,7 @@ uint32_t nvc0_grgpc_data[] = {
49 0x0194012c, 58 0x0194012c,
50 0x025401f8, 59 0x025401f8,
51 0x00000000, 60 0x00000000,
61/* 0x00c8: nvc0_gpc_mmio_head */
52 0x00000380, 62 0x00000380,
53 0x14000400, 63 0x14000400,
54 0x20000450, 64 0x20000450,
@@ -73,7 +83,10 @@ uint32_t nvc0_grgpc_data[] = {
73 0x00000c8c, 83 0x00000c8c,
74 0x08001000, 84 0x08001000,
75 0x00001014, 85 0x00001014,
86/* 0x0128: nvc0_gpc_mmio_tail */
76 0x00000c6c, 87 0x00000c6c,
88/* 0x012c: nvc1_gpc_mmio_tail */
89/* 0x012c: nvd9_gpc_mmio_head */
77 0x00000380, 90 0x00000380,
78 0x04000400, 91 0x04000400,
79 0x0800040c, 92 0x0800040c,
@@ -100,6 +113,8 @@ uint32_t nvc0_grgpc_data[] = {
100 0x00000c8c, 113 0x00000c8c,
101 0x08001000, 114 0x08001000,
102 0x00001014, 115 0x00001014,
116/* 0x0194: nvd9_gpc_mmio_tail */
117/* 0x0194: nvc0_tpc_mmio_head */
103 0x00000018, 118 0x00000018,
104 0x0000003c, 119 0x0000003c,
105 0x00000048, 120 0x00000048,
@@ -120,11 +135,16 @@ uint32_t nvc0_grgpc_data[] = {
120 0x4c000644, 135 0x4c000644,
121 0x00000698, 136 0x00000698,
122 0x04000750, 137 0x04000750,
138/* 0x01e4: nvc0_tpc_mmio_tail */
123 0x00000758, 139 0x00000758,
124 0x000002c4, 140 0x000002c4,
125 0x000006e0, 141 0x000006e0,
142/* 0x01f0: nvcf_tpc_mmio_tail */
126 0x000004bc, 143 0x000004bc,
144/* 0x01f4: nvc3_tpc_mmio_tail */
127 0x00000544, 145 0x00000544,
146/* 0x01f8: nvc1_tpc_mmio_tail */
147/* 0x01f8: nvd9_tpc_mmio_head */
128 0x00000018, 148 0x00000018,
129 0x0000003c, 149 0x0000003c,
130 0x00000048, 150 0x00000048,
@@ -152,12 +172,14 @@ uint32_t nvc0_grgpc_data[] = {
152 172
153uint32_t nvc0_grgpc_code[] = { 173uint32_t nvc0_grgpc_code[] = {
154 0x03060ef5, 174 0x03060ef5,
175/* 0x0004: queue_put */
155 0x9800d898, 176 0x9800d898,
156 0x86f001d9, 177 0x86f001d9,
157 0x0489b808, 178 0x0489b808,
158 0xf00c1bf4, 179 0xf00c1bf4,
159 0x21f502f7, 180 0x21f502f7,
160 0x00f802ec, 181 0x00f802ec,
182/* 0x001c: queue_put_next */
161 0xb60798c4, 183 0xb60798c4,
162 0x8dbb0384, 184 0x8dbb0384,
163 0x0880b600, 185 0x0880b600,
@@ -165,6 +187,7 @@ uint32_t nvc0_grgpc_code[] = {
165 0x90b6018f, 187 0x90b6018f,
166 0x0f94f001, 188 0x0f94f001,
167 0xf801d980, 189 0xf801d980,
190/* 0x0039: queue_get */
168 0x0131f400, 191 0x0131f400,
169 0x9800d898, 192 0x9800d898,
170 0x89b801d9, 193 0x89b801d9,
@@ -176,37 +199,46 @@ uint32_t nvc0_grgpc_code[] = {
176 0x80b6019f, 199 0x80b6019f,
177 0x0f84f001, 200 0x0f84f001,
178 0xf400d880, 201 0xf400d880,
202/* 0x0066: queue_get_done */
179 0x00f80132, 203 0x00f80132,
204/* 0x0068: nv_rd32 */
180 0x0728b7f1, 205 0x0728b7f1,
181 0xb906b4b6, 206 0xb906b4b6,
182 0xc9f002ec, 207 0xc9f002ec,
183 0x00bcd01f, 208 0x00bcd01f,
209/* 0x0078: nv_rd32_wait */
184 0xc800bccf, 210 0xc800bccf,
185 0x1bf41fcc, 211 0x1bf41fcc,
186 0x06a7f0fa, 212 0x06a7f0fa,
187 0x010321f5, 213 0x010321f5,
188 0xf840bfcf, 214 0xf840bfcf,
215/* 0x008d: nv_wr32 */
189 0x28b7f100, 216 0x28b7f100,
190 0x06b4b607, 217 0x06b4b607,
191 0xb980bfd0, 218 0xb980bfd0,
192 0xc9f002ec, 219 0xc9f002ec,
193 0x1ec9f01f, 220 0x1ec9f01f,
221/* 0x00a3: nv_wr32_wait */
194 0xcf00bcd0, 222 0xcf00bcd0,
195 0xccc800bc, 223 0xccc800bc,
196 0xfa1bf41f, 224 0xfa1bf41f,
225/* 0x00ae: watchdog_reset */
197 0x87f100f8, 226 0x87f100f8,
198 0x84b60430, 227 0x84b60430,
199 0x1ff9f006, 228 0x1ff9f006,
200 0xf8008fd0, 229 0xf8008fd0,
230/* 0x00bd: watchdog_clear */
201 0x3087f100, 231 0x3087f100,
202 0x0684b604, 232 0x0684b604,
203 0xf80080d0, 233 0xf80080d0,
234/* 0x00c9: wait_donez */
204 0x3c87f100, 235 0x3c87f100,
205 0x0684b608, 236 0x0684b608,
206 0x99f094bd, 237 0x99f094bd,
207 0x0089d000, 238 0x0089d000,
208 0x081887f1, 239 0x081887f1,
209 0xd00684b6, 240 0xd00684b6,
241/* 0x00e2: wait_done_wait_donez */
210 0x87f1008a, 242 0x87f1008a,
211 0x84b60400, 243 0x84b60400,
212 0x0088cf06, 244 0x0088cf06,
@@ -215,6 +247,7 @@ uint32_t nvc0_grgpc_code[] = {
215 0x84b6085c, 247 0x84b6085c,
216 0xf094bd06, 248 0xf094bd06,
217 0x89d00099, 249 0x89d00099,
250/* 0x0103: wait_doneo */
218 0xf100f800, 251 0xf100f800,
219 0xb6083c87, 252 0xb6083c87,
220 0x94bd0684, 253 0x94bd0684,
@@ -222,6 +255,7 @@ uint32_t nvc0_grgpc_code[] = {
222 0x87f10089, 255 0x87f10089,
223 0x84b60818, 256 0x84b60818,
224 0x008ad006, 257 0x008ad006,
258/* 0x011c: wait_done_wait_doneo */
225 0x040087f1, 259 0x040087f1,
226 0xcf0684b6, 260 0xcf0684b6,
227 0x8aff0088, 261 0x8aff0088,
@@ -230,6 +264,8 @@ uint32_t nvc0_grgpc_code[] = {
230 0xbd0684b6, 264 0xbd0684b6,
231 0x0099f094, 265 0x0099f094,
232 0xf80089d0, 266 0xf80089d0,
267/* 0x013d: mmctx_size */
268/* 0x013f: nv_mmctx_size_loop */
233 0x9894bd00, 269 0x9894bd00,
234 0x85b600e8, 270 0x85b600e8,
235 0x0180b61a, 271 0x0180b61a,
@@ -238,6 +274,7 @@ uint32_t nvc0_grgpc_code[] = {
238 0x04efb804, 274 0x04efb804,
239 0xb9eb1bf4, 275 0xb9eb1bf4,
240 0x00f8029f, 276 0x00f8029f,
277/* 0x015c: mmctx_xfer */
241 0x083c87f1, 278 0x083c87f1,
242 0xbd0684b6, 279 0xbd0684b6,
243 0x0199f094, 280 0x0199f094,
@@ -247,9 +284,11 @@ uint32_t nvc0_grgpc_code[] = {
247 0xf405bbfd, 284 0xf405bbfd,
248 0x8bd0090b, 285 0x8bd0090b,
249 0x0099f000, 286 0x0099f000,
287/* 0x0180: mmctx_base_disabled */
250 0xf405eefd, 288 0xf405eefd,
251 0x8ed00c0b, 289 0x8ed00c0b,
252 0xc08fd080, 290 0xc08fd080,
291/* 0x018f: mmctx_multi_disabled */
253 0xb70199f0, 292 0xb70199f0,
254 0xc8010080, 293 0xc8010080,
255 0xb4b600ab, 294 0xb4b600ab,
@@ -257,6 +296,8 @@ uint32_t nvc0_grgpc_code[] = {
257 0xb601aec8, 296 0xb601aec8,
258 0xbefd11e4, 297 0xbefd11e4,
259 0x008bd005, 298 0x008bd005,
299/* 0x01a8: mmctx_exec_loop */
300/* 0x01a8: mmctx_wait_free */
260 0xf0008ecf, 301 0xf0008ecf,
261 0x0bf41fe4, 302 0x0bf41fe4,
262 0x00ce98fa, 303 0x00ce98fa,
@@ -265,34 +306,42 @@ uint32_t nvc0_grgpc_code[] = {
265 0x04cdb804, 306 0x04cdb804,
266 0xc8e81bf4, 307 0xc8e81bf4,
267 0x1bf402ab, 308 0x1bf402ab,
309/* 0x01c9: mmctx_fini_wait */
268 0x008bcf18, 310 0x008bcf18,
269 0xb01fb4f0, 311 0xb01fb4f0,
270 0x1bf410b4, 312 0x1bf410b4,
271 0x02a7f0f7, 313 0x02a7f0f7,
272 0xf4c921f4, 314 0xf4c921f4,
315/* 0x01de: mmctx_stop */
273 0xabc81b0e, 316 0xabc81b0e,
274 0x10b4b600, 317 0x10b4b600,
275 0xf00cb9f0, 318 0xf00cb9f0,
276 0x8bd012b9, 319 0x8bd012b9,
320/* 0x01ed: mmctx_stop_wait */
277 0x008bcf00, 321 0x008bcf00,
278 0xf412bbc8, 322 0xf412bbc8,
323/* 0x01f6: mmctx_done */
279 0x87f1fa1b, 324 0x87f1fa1b,
280 0x84b6085c, 325 0x84b6085c,
281 0xf094bd06, 326 0xf094bd06,
282 0x89d00199, 327 0x89d00199,
328/* 0x0207: strand_wait */
283 0xf900f800, 329 0xf900f800,
284 0x02a7f0a0, 330 0x02a7f0a0,
285 0xfcc921f4, 331 0xfcc921f4,
332/* 0x0213: strand_pre */
286 0xf100f8a0, 333 0xf100f8a0,
287 0xf04afc87, 334 0xf04afc87,
288 0x97f00283, 335 0x97f00283,
289 0x0089d00c, 336 0x0089d00c,
290 0x020721f5, 337 0x020721f5,
338/* 0x0226: strand_post */
291 0x87f100f8, 339 0x87f100f8,
292 0x83f04afc, 340 0x83f04afc,
293 0x0d97f002, 341 0x0d97f002,
294 0xf50089d0, 342 0xf50089d0,
295 0xf8020721, 343 0xf8020721,
344/* 0x0239: strand_set */
296 0xfca7f100, 345 0xfca7f100,
297 0x02a3f04f, 346 0x02a3f04f,
298 0x0500aba2, 347 0x0500aba2,
@@ -303,6 +352,7 @@ uint32_t nvc0_grgpc_code[] = {
303 0xf000aed0, 352 0xf000aed0,
304 0xbcd00ac7, 353 0xbcd00ac7,
305 0x0721f500, 354 0x0721f500,
355/* 0x0263: strand_ctx_init */
306 0xf100f802, 356 0xf100f802,
307 0xb6083c87, 357 0xb6083c87,
308 0x94bd0684, 358 0x94bd0684,
@@ -325,6 +375,7 @@ uint32_t nvc0_grgpc_code[] = {
325 0x0684b608, 375 0x0684b608,
326 0xb70089cf, 376 0xb70089cf,
327 0x95220080, 377 0x95220080,
378/* 0x02ba: ctx_init_strand_loop */
328 0x8ed008fe, 379 0x8ed008fe,
329 0x408ed000, 380 0x408ed000,
330 0xb6808acf, 381 0xb6808acf,
@@ -338,12 +389,14 @@ uint32_t nvc0_grgpc_code[] = {
338 0x94bd0684, 389 0x94bd0684,
339 0xd00399f0, 390 0xd00399f0,
340 0x00f80089, 391 0x00f80089,
392/* 0x02ec: error */
341 0xe7f1e0f9, 393 0xe7f1e0f9,
342 0xe3f09814, 394 0xe3f09814,
343 0x8d21f440, 395 0x8d21f440,
344 0x041ce0b7, 396 0x041ce0b7,
345 0xf401f7f0, 397 0xf401f7f0,
346 0xe0fc8d21, 398 0xe0fc8d21,
399/* 0x0306: init */
347 0x04bd00f8, 400 0x04bd00f8,
348 0xf10004fe, 401 0xf10004fe,
349 0xf0120017, 402 0xf0120017,
@@ -366,11 +419,13 @@ uint32_t nvc0_grgpc_code[] = {
366 0x27f10002, 419 0x27f10002,
367 0x24b60800, 420 0x24b60800,
368 0x0022cf06, 421 0x0022cf06,
422/* 0x035f: init_find_chipset */
369 0xb65817f0, 423 0xb65817f0,
370 0x13980c10, 424 0x13980c10,
371 0x0432b800, 425 0x0432b800,
372 0xb00b0bf4, 426 0xb00b0bf4,
373 0x1bf40034, 427 0x1bf40034,
428/* 0x0373: init_context */
374 0xf100f8f1, 429 0xf100f8f1,
375 0xb6080027, 430 0xb6080027,
376 0x22cf0624, 431 0x22cf0624,
@@ -407,6 +462,7 @@ uint32_t nvc0_grgpc_code[] = {
407 0x0010b740, 462 0x0010b740,
408 0xf024bd08, 463 0xf024bd08,
409 0x12d01f29, 464 0x12d01f29,
465/* 0x0401: main */
410 0x0031f400, 466 0x0031f400,
411 0xf00028f4, 467 0xf00028f4,
412 0x21f41cd7, 468 0x21f41cd7,
@@ -419,9 +475,11 @@ uint32_t nvc0_grgpc_code[] = {
419 0xfe051efd, 475 0xfe051efd,
420 0x21f50018, 476 0x21f50018,
421 0x0ef404c3, 477 0x0ef404c3,
478/* 0x0431: main_not_ctx_xfer */
422 0x10ef94d3, 479 0x10ef94d3,
423 0xf501f5f0, 480 0xf501f5f0,
424 0xf402ec21, 481 0xf402ec21,
482/* 0x043e: ih */
425 0x80f9c60e, 483 0x80f9c60e,
426 0xf90188fe, 484 0xf90188fe,
427 0xf990f980, 485 0xf990f980,
@@ -436,30 +494,36 @@ uint32_t nvc0_grgpc_code[] = {
436 0xb0b70421, 494 0xb0b70421,
437 0xe7f00400, 495 0xe7f00400,
438 0x00bed001, 496 0x00bed001,
497/* 0x0474: ih_no_fifo */
439 0xfc400ad0, 498 0xfc400ad0,
440 0xfce0fcf0, 499 0xfce0fcf0,
441 0xfcb0fcd0, 500 0xfcb0fcd0,
442 0xfc90fca0, 501 0xfc90fca0,
443 0x0088fe80, 502 0x0088fe80,
444 0x32f480fc, 503 0x32f480fc,
504/* 0x048f: hub_barrier_done */
445 0xf001f800, 505 0xf001f800,
446 0x0e9801f7, 506 0x0e9801f7,
447 0x04febb00, 507 0x04febb00,
448 0x9418e7f1, 508 0x9418e7f1,
449 0xf440e3f0, 509 0xf440e3f0,
450 0x00f88d21, 510 0x00f88d21,
511/* 0x04a4: ctx_redswitch */
451 0x0614e7f1, 512 0x0614e7f1,
452 0xf006e4b6, 513 0xf006e4b6,
453 0xefd020f7, 514 0xefd020f7,
454 0x08f7f000, 515 0x08f7f000,
516/* 0x04b4: ctx_redswitch_delay */
455 0xf401f2b6, 517 0xf401f2b6,
456 0xf7f1fd1b, 518 0xf7f1fd1b,
457 0xefd00a20, 519 0xefd00a20,
520/* 0x04c3: ctx_xfer */
458 0xf100f800, 521 0xf100f800,
459 0xb60a0417, 522 0xb60a0417,
460 0x1fd00614, 523 0x1fd00614,
461 0x0711f400, 524 0x0711f400,
462 0x04a421f5, 525 0x04a421f5,
526/* 0x04d4: ctx_xfer_not_load */
463 0x4afc17f1, 527 0x4afc17f1,
464 0xf00213f0, 528 0xf00213f0,
465 0x12d00c27, 529 0x12d00c27,
@@ -489,11 +553,13 @@ uint32_t nvc0_grgpc_code[] = {
489 0x5c21f508, 553 0x5c21f508,
490 0x0721f501, 554 0x0721f501,
491 0x0601f402, 555 0x0601f402,
556/* 0x054b: ctx_xfer_post */
492 0xf11412f4, 557 0xf11412f4,
493 0xf04afc17, 558 0xf04afc17,
494 0x27f00213, 559 0x27f00213,
495 0x0012d00d, 560 0x0012d00d,
496 0x020721f5, 561 0x020721f5,
562/* 0x055c: ctx_xfer_done */
497 0x048f21f5, 563 0x048f21f5,
498 0x000000f8, 564 0x000000f8,
499 0x00000000, 565 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
new file mode 100644
index 000000000000..7b715fda2763
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -0,0 +1,451 @@
1/* fuc microcode for nve0 PGRAPH/GPC
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nve0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grgpc.fuc.h
28 */
29
30/* TODO
31 * - bracket certain functions with scratch writes, useful for debugging
32 * - watchdog timer around ctx operations
33 */
34
35.section #nve0_grgpc_data
36include(`nve0.fuc')
37gpc_id: .b32 0
38gpc_mmio_list_head: .b32 0
39gpc_mmio_list_tail: .b32 0
40
41tpc_count: .b32 0
42tpc_mask: .b32 0
43tpc_mmio_list_head: .b32 0
44tpc_mmio_list_tail: .b32 0
45
46cmd_queue: queue_init
47
48// chipset descriptions
49chipsets:
50.b8 0xe4 0 0 0
51.b16 #nve4_gpc_mmio_head
52.b16 #nve4_gpc_mmio_tail
53.b16 #nve4_tpc_mmio_head
54.b16 #nve4_tpc_mmio_tail
55.b8 0xe7 0 0 0
56.b16 #nve4_gpc_mmio_head
57.b16 #nve4_gpc_mmio_tail
58.b16 #nve4_tpc_mmio_head
59.b16 #nve4_tpc_mmio_tail
60.b8 0 0 0 0
61
62// GPC mmio lists
63nve4_gpc_mmio_head:
64mmctx_data(0x000380, 1)
65mmctx_data(0x000400, 2)
66mmctx_data(0x00040c, 3)
67mmctx_data(0x000450, 9)
68mmctx_data(0x000600, 1)
69mmctx_data(0x000684, 1)
70mmctx_data(0x000700, 5)
71mmctx_data(0x000800, 1)
72mmctx_data(0x000808, 3)
73mmctx_data(0x000828, 1)
74mmctx_data(0x000830, 1)
75mmctx_data(0x0008d8, 1)
76mmctx_data(0x0008e0, 1)
77mmctx_data(0x0008e8, 6)
78mmctx_data(0x00091c, 1)
79mmctx_data(0x000924, 3)
80mmctx_data(0x000b00, 1)
81mmctx_data(0x000b08, 6)
82mmctx_data(0x000bb8, 1)
83mmctx_data(0x000c08, 1)
84mmctx_data(0x000c10, 8)
85mmctx_data(0x000c40, 1)
86mmctx_data(0x000c6c, 1)
87mmctx_data(0x000c80, 1)
88mmctx_data(0x000c8c, 1)
89mmctx_data(0x001000, 3)
90mmctx_data(0x001014, 1)
91mmctx_data(0x003024, 1)
92mmctx_data(0x0030c0, 2)
93mmctx_data(0x0030e4, 1)
94mmctx_data(0x003100, 6)
95mmctx_data(0x0031d0, 1)
96mmctx_data(0x0031e0, 2)
97nve4_gpc_mmio_tail:
98
99// TPC mmio lists
100nve4_tpc_mmio_head:
101mmctx_data(0x000048, 1)
102mmctx_data(0x000064, 1)
103mmctx_data(0x000088, 1)
104mmctx_data(0x000200, 6)
105mmctx_data(0x00021c, 2)
106mmctx_data(0x000230, 1)
107mmctx_data(0x0002c4, 1)
108mmctx_data(0x000400, 3)
109mmctx_data(0x000420, 3)
110mmctx_data(0x0004e8, 1)
111mmctx_data(0x0004f4, 1)
112mmctx_data(0x000604, 4)
113mmctx_data(0x000644, 22)
114mmctx_data(0x0006ac, 2)
115mmctx_data(0x0006c8, 1)
116mmctx_data(0x000730, 8)
117mmctx_data(0x000758, 1)
118mmctx_data(0x000778, 1)
119nve4_tpc_mmio_tail:
120
121.section #nve0_grgpc_code
122bra #init
123define(`include_code')
124include(`nve0.fuc')
125
126// reports an exception to the host
127//
128// In: $r15 error code (see nve0.fuc)
129//
130error:
131 push $r14
132 mov $r14 -0x67ec // 0x9814
133 sethi $r14 0x400000
134 call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
135 add b32 $r14 0x41c
136 mov $r15 1
137 call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
138 pop $r14
139 ret
140
141// GPC fuc initialisation, executed by triggering ucode start, will
142// fall through to main loop after completion.
143//
144// Input:
145// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
146// CC_SCRATCH[1]: context base
147//
148// Output:
149// CC_SCRATCH[0]:
150// 31:31: set to signal completion
151// CC_SCRATCH[1]:
152// 31:0: GPC context size
153//
154init:
155 clear b32 $r0
156 mov $sp $r0
157
158 // enable fifo access
159 mov $r1 0x1200
160 mov $r2 2
161 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
162
163 // setup i0 handler, and route all interrupts to it
164 mov $r1 #ih
165 mov $iv0 $r1
166 mov $r1 0x400
167 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
168
169 // enable fifo interrupt
170 mov $r2 4
171 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
172
173 // enable interrupts
174 bset $flags ie0
175
176 // figure out which GPC we are, and how many TPCs we have
177 mov $r1 0x608
178 shl b32 $r1 6
179 iord $r2 I[$r1 + 0x000] // UNITS
180 mov $r3 1
181 and $r2 0x1f
182 shl b32 $r3 $r2
183 sub b32 $r3 1
184 st b32 D[$r0 + #tpc_count] $r2
185 st b32 D[$r0 + #tpc_mask] $r3
186 add b32 $r1 0x400
187 iord $r2 I[$r1 + 0x000] // MYINDEX
188 st b32 D[$r0 + #gpc_id] $r2
189
190 // find context data for this chipset
191 mov $r2 0x800
192 shl b32 $r2 6
193 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
194 mov $r1 #chipsets - 12
195 init_find_chipset:
196 add b32 $r1 12
197 ld b32 $r3 D[$r1 + 0x00]
198 cmpu b32 $r3 $r2
199 bra e #init_context
200 cmpu b32 $r3 0
201 bra ne #init_find_chipset
202 // unknown chipset
203 ret
204
205 // initialise context base, and size tracking
206 init_context:
207 mov $r2 0x800
208 shl b32 $r2 6
209 iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
210 clear b32 $r3 // track GPC context size here
211
212 // set mmctx base addresses now so we don't have to do it later,
213 // they don't currently ever change
214 mov $r4 0x700
215 shl b32 $r4 6
216 shr b32 $r5 $r2 8
217 iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
218 iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
219
220 // calculate GPC mmio context size, store the chipset-specific
221 // mmio list pointers somewhere we can get at them later without
222 // re-parsing the chipset list
223 clear b32 $r14
224 clear b32 $r15
225 ld b16 $r14 D[$r1 + 4]
226 ld b16 $r15 D[$r1 + 6]
227 st b16 D[$r0 + #gpc_mmio_list_head] $r14
228 st b16 D[$r0 + #gpc_mmio_list_tail] $r15
229 call #mmctx_size
230 add b32 $r2 $r15
231 add b32 $r3 $r15
232
233 // calculate per-TPC mmio context size, store the list pointers
234 ld b16 $r14 D[$r1 + 8]
235 ld b16 $r15 D[$r1 + 10]
236 st b16 D[$r0 + #tpc_mmio_list_head] $r14
237 st b16 D[$r0 + #tpc_mmio_list_tail] $r15
238 call #mmctx_size
239 ld b32 $r14 D[$r0 + #tpc_count]
240 mulu $r14 $r15
241 add b32 $r2 $r14
242 add b32 $r3 $r14
243
244 // round up base/size to 256 byte boundary (for strand SWBASE)
245 add b32 $r4 0x1300
246 shr b32 $r3 2
247 iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
248 shr b32 $r2 8
249 shr b32 $r3 6
250 add b32 $r2 1
251 add b32 $r3 1
252 shl b32 $r2 8
253 shl b32 $r3 8
254
255 // calculate size of strand context data
256 mov b32 $r15 $r2
257 call #strand_ctx_init
258 add b32 $r3 $r15
259
260 // save context size, and tell HUB we're done
261 mov $r1 0x800
262 shl b32 $r1 6
263 iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
264 add b32 $r1 0x800
265 clear b32 $r2
266 bset $r2 31
267 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
268
269// Main program loop, very simple, sleeps until woken up by the interrupt
270// handler, pulls a command from the queue and executes its handler
271//
272main:
273 bset $flags $p0
274 sleep $p0
275 mov $r13 #cmd_queue
276 call #queue_get
277 bra $p1 #main
278
279 // 0x0000-0x0003 are all context transfers
280 cmpu b32 $r14 0x04
281 bra nc #main_not_ctx_xfer
282 // fetch $flags and mask off $p1/$p2
283 mov $r1 $flags
284 mov $r2 0x0006
285 not b32 $r2
286 and $r1 $r2
287 // set $p1/$p2 according to transfer type
288 shl b32 $r14 1
289 or $r1 $r14
290 mov $flags $r1
291 // transfer context data
292 call #ctx_xfer
293 bra #main
294
295 main_not_ctx_xfer:
296 shl b32 $r15 $r14 16
297 or $r15 E_BAD_COMMAND
298 call #error
299 bra #main
300
301// interrupt handler
302ih:
303 push $r8
304 mov $r8 $flags
305 push $r8
306 push $r9
307 push $r10
308 push $r11
309 push $r13
310 push $r14
311 push $r15
312
313 // incoming fifo command?
314 iord $r10 I[$r0 + 0x200] // INTR
315 and $r11 $r10 0x00000004
316 bra e #ih_no_fifo
317 // queue incoming fifo command for later processing
318 mov $r11 0x1900
319 mov $r13 #cmd_queue
320 iord $r14 I[$r11 + 0x100] // FIFO_CMD
321 iord $r15 I[$r11 + 0x000] // FIFO_DATA
322 call #queue_put
323 add b32 $r11 0x400
324 mov $r14 1
325 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
326
327 // ack, and wake up main()
328 ih_no_fifo:
329 iowr I[$r0 + 0x100] $r10 // INTR_ACK
330
331 pop $r15
332 pop $r14
333 pop $r13
334 pop $r11
335 pop $r10
336 pop $r9
337 pop $r8
338 mov $flags $r8
339 pop $r8
340 bclr $flags $p0
341 iret
342
343// Set this GPC's bit in HUB_BAR, used to signal completion of various
344// activities to the HUB fuc
345//
346hub_barrier_done:
347 mov $r15 1
348 ld b32 $r14 D[$r0 + #gpc_id]
349 shl b32 $r15 $r14
350 mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
351 sethi $r14 0x400000
352 call #nv_wr32
353 ret
354
355// Disables various things, waits a bit, and re-enables them..
356//
357// Not sure how exactly this helps, perhaps "ENABLE" is not such a
358// good description for the bits we turn off? Anyways, without this,
359// funny things happen.
360//
361ctx_redswitch:
362 mov $r14 0x614
363 shl b32 $r14 6
364 mov $r15 0x020
365 iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
366 mov $r15 8
367 ctx_redswitch_delay:
368 sub b32 $r15 1
369 bra ne #ctx_redswitch_delay
370 mov $r15 0xa20
371 iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
372 ret
373
374// Transfer GPC context data between GPU and storage area
375//
376// In: $r15 context base address
377// $p1 clear on save, set on load
378// $p2 set if opposite direction done/will be done, so:
379// on save it means: "a load will follow this save"
380// on load it means: "a save preceeded this load"
381//
382ctx_xfer:
383 // set context base address
384 mov $r1 0xa04
385 shl b32 $r1 6
386 iowr I[$r1 + 0x000] $r15// MEM_BASE
387 bra not $p1 #ctx_xfer_not_load
388 call #ctx_redswitch
389 ctx_xfer_not_load:
390
391 // strands
392 mov $r1 0x4afc
393 sethi $r1 0x20000
394 mov $r2 0xc
395 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
396 call #strand_wait
397 mov $r2 0x47fc
398 sethi $r2 0x20000
399 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
400 xbit $r2 $flags $p1
401 add b32 $r2 3
402 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
403
404 // mmio context
405 xbit $r10 $flags $p1 // direction
406 or $r10 2 // first
407 mov $r11 0x0000
408 sethi $r11 0x500000
409 ld b32 $r12 D[$r0 + #gpc_id]
410 shl b32 $r12 15
411 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
412 ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
413 ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
414 mov $r14 0 // not multi
415 call #mmctx_xfer
416
417 // per-TPC mmio context
418 xbit $r10 $flags $p1 // direction
419 or $r10 4 // last
420 mov $r11 0x4000
421 sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
422 ld b32 $r12 D[$r0 + #gpc_id]
423 shl b32 $r12 15
424 add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
425 ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
426 ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
427 ld b32 $r15 D[$r0 + #tpc_mask]
428 mov $r14 0x800 // stride = 0x800
429 call #mmctx_xfer
430
431 // wait for strands to finish
432 call #strand_wait
433
434 // if load, or a save without a load following, do some
435 // unknown stuff that's done after finishing a block of
436 // strand commands
437 bra $p1 #ctx_xfer_post
438 bra not $p2 #ctx_xfer_done
439 ctx_xfer_post:
440 mov $r1 0x4afc
441 sethi $r1 0x20000
442 mov $r2 0xd
443 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
444 call #strand_wait
445
446 // mark completion in HUB's barrier
447 ctx_xfer_done:
448 call #hub_barrier_done
449 ret
450
451.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
new file mode 100644
index 000000000000..26c2165bad0f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -0,0 +1,530 @@
1uint32_t nve0_grgpc_data[] = {
2/* 0x0000: gpc_id */
3 0x00000000,
4/* 0x0004: gpc_mmio_list_head */
5 0x00000000,
6/* 0x0008: gpc_mmio_list_tail */
7 0x00000000,
8/* 0x000c: tpc_count */
9 0x00000000,
10/* 0x0010: tpc_mask */
11 0x00000000,
12/* 0x0014: tpc_mmio_list_head */
13 0x00000000,
14/* 0x0018: tpc_mmio_list_tail */
15 0x00000000,
16/* 0x001c: cmd_queue */
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x00000000,
34 0x00000000,
35/* 0x0064: chipsets */
36 0x000000e4,
37 0x01040080,
38 0x014c0104,
39 0x000000e7,
40 0x01040080,
41 0x014c0104,
42 0x00000000,
43/* 0x0080: nve4_gpc_mmio_head */
44 0x00000380,
45 0x04000400,
46 0x0800040c,
47 0x20000450,
48 0x00000600,
49 0x00000684,
50 0x10000700,
51 0x00000800,
52 0x08000808,
53 0x00000828,
54 0x00000830,
55 0x000008d8,
56 0x000008e0,
57 0x140008e8,
58 0x0000091c,
59 0x08000924,
60 0x00000b00,
61 0x14000b08,
62 0x00000bb8,
63 0x00000c08,
64 0x1c000c10,
65 0x00000c40,
66 0x00000c6c,
67 0x00000c80,
68 0x00000c8c,
69 0x08001000,
70 0x00001014,
71 0x00003024,
72 0x040030c0,
73 0x000030e4,
74 0x14003100,
75 0x000031d0,
76 0x040031e0,
77/* 0x0104: nve4_gpc_mmio_tail */
78/* 0x0104: nve4_tpc_mmio_head */
79 0x00000048,
80 0x00000064,
81 0x00000088,
82 0x14000200,
83 0x0400021c,
84 0x00000230,
85 0x000002c4,
86 0x08000400,
87 0x08000420,
88 0x000004e8,
89 0x000004f4,
90 0x0c000604,
91 0x54000644,
92 0x040006ac,
93 0x000006c8,
94 0x1c000730,
95 0x00000758,
96 0x00000778,
97};
98
99uint32_t nve0_grgpc_code[] = {
100 0x03060ef5,
101/* 0x0004: queue_put */
102 0x9800d898,
103 0x86f001d9,
104 0x0489b808,
105 0xf00c1bf4,
106 0x21f502f7,
107 0x00f802ec,
108/* 0x001c: queue_put_next */
109 0xb60798c4,
110 0x8dbb0384,
111 0x0880b600,
112 0x80008e80,
113 0x90b6018f,
114 0x0f94f001,
115 0xf801d980,
116/* 0x0039: queue_get */
117 0x0131f400,
118 0x9800d898,
119 0x89b801d9,
120 0x210bf404,
121 0xb60789c4,
122 0x9dbb0394,
123 0x0890b600,
124 0x98009e98,
125 0x80b6019f,
126 0x0f84f001,
127 0xf400d880,
128/* 0x0066: queue_get_done */
129 0x00f80132,
130/* 0x0068: nv_rd32 */
131 0x0728b7f1,
132 0xb906b4b6,
133 0xc9f002ec,
134 0x00bcd01f,
135/* 0x0078: nv_rd32_wait */
136 0xc800bccf,
137 0x1bf41fcc,
138 0x06a7f0fa,
139 0x010321f5,
140 0xf840bfcf,
141/* 0x008d: nv_wr32 */
142 0x28b7f100,
143 0x06b4b607,
144 0xb980bfd0,
145 0xc9f002ec,
146 0x1ec9f01f,
147/* 0x00a3: nv_wr32_wait */
148 0xcf00bcd0,
149 0xccc800bc,
150 0xfa1bf41f,
151/* 0x00ae: watchdog_reset */
152 0x87f100f8,
153 0x84b60430,
154 0x1ff9f006,
155 0xf8008fd0,
156/* 0x00bd: watchdog_clear */
157 0x3087f100,
158 0x0684b604,
159 0xf80080d0,
160/* 0x00c9: wait_donez */
161 0x3c87f100,
162 0x0684b608,
163 0x99f094bd,
164 0x0089d000,
165 0x081887f1,
166 0xd00684b6,
167/* 0x00e2: wait_done_wait_donez */
168 0x87f1008a,
169 0x84b60400,
170 0x0088cf06,
171 0xf4888aff,
172 0x87f1f31b,
173 0x84b6085c,
174 0xf094bd06,
175 0x89d00099,
176/* 0x0103: wait_doneo */
177 0xf100f800,
178 0xb6083c87,
179 0x94bd0684,
180 0xd00099f0,
181 0x87f10089,
182 0x84b60818,
183 0x008ad006,
184/* 0x011c: wait_done_wait_doneo */
185 0x040087f1,
186 0xcf0684b6,
187 0x8aff0088,
188 0xf30bf488,
189 0x085c87f1,
190 0xbd0684b6,
191 0x0099f094,
192 0xf80089d0,
193/* 0x013d: mmctx_size */
194/* 0x013f: nv_mmctx_size_loop */
195 0x9894bd00,
196 0x85b600e8,
197 0x0180b61a,
198 0xbb0284b6,
199 0xe0b60098,
200 0x04efb804,
201 0xb9eb1bf4,
202 0x00f8029f,
203/* 0x015c: mmctx_xfer */
204 0x083c87f1,
205 0xbd0684b6,
206 0x0199f094,
207 0xf10089d0,
208 0xb6071087,
209 0x94bd0684,
210 0xf405bbfd,
211 0x8bd0090b,
212 0x0099f000,
213/* 0x0180: mmctx_base_disabled */
214 0xf405eefd,
215 0x8ed00c0b,
216 0xc08fd080,
217/* 0x018f: mmctx_multi_disabled */
218 0xb70199f0,
219 0xc8010080,
220 0xb4b600ab,
221 0x0cb9f010,
222 0xb601aec8,
223 0xbefd11e4,
224 0x008bd005,
225/* 0x01a8: mmctx_exec_loop */
226/* 0x01a8: mmctx_wait_free */
227 0xf0008ecf,
228 0x0bf41fe4,
229 0x00ce98fa,
230 0xd005e9fd,
231 0xc0b6c08e,
232 0x04cdb804,
233 0xc8e81bf4,
234 0x1bf402ab,
235/* 0x01c9: mmctx_fini_wait */
236 0x008bcf18,
237 0xb01fb4f0,
238 0x1bf410b4,
239 0x02a7f0f7,
240 0xf4c921f4,
241/* 0x01de: mmctx_stop */
242 0xabc81b0e,
243 0x10b4b600,
244 0xf00cb9f0,
245 0x8bd012b9,
246/* 0x01ed: mmctx_stop_wait */
247 0x008bcf00,
248 0xf412bbc8,
249/* 0x01f6: mmctx_done */
250 0x87f1fa1b,
251 0x84b6085c,
252 0xf094bd06,
253 0x89d00199,
254/* 0x0207: strand_wait */
255 0xf900f800,
256 0x02a7f0a0,
257 0xfcc921f4,
258/* 0x0213: strand_pre */
259 0xf100f8a0,
260 0xf04afc87,
261 0x97f00283,
262 0x0089d00c,
263 0x020721f5,
264/* 0x0226: strand_post */
265 0x87f100f8,
266 0x83f04afc,
267 0x0d97f002,
268 0xf50089d0,
269 0xf8020721,
270/* 0x0239: strand_set */
271 0xfca7f100,
272 0x02a3f04f,
273 0x0500aba2,
274 0xd00fc7f0,
275 0xc7f000ac,
276 0x00bcd00b,
277 0x020721f5,
278 0xf000aed0,
279 0xbcd00ac7,
280 0x0721f500,
281/* 0x0263: strand_ctx_init */
282 0xf100f802,
283 0xb6083c87,
284 0x94bd0684,
285 0xd00399f0,
286 0x21f50089,
287 0xe7f00213,
288 0x3921f503,
289 0xfca7f102,
290 0x02a3f046,
291 0x0400aba0,
292 0xf040a0d0,
293 0xbcd001c7,
294 0x0721f500,
295 0x010c9202,
296 0xf000acd0,
297 0xbcd002c7,
298 0x0721f500,
299 0x2621f502,
300 0x8087f102,
301 0x0684b608,
302 0xb70089cf,
303 0x95220080,
304/* 0x02ba: ctx_init_strand_loop */
305 0x8ed008fe,
306 0x408ed000,
307 0xb6808acf,
308 0xa0b606a5,
309 0x00eabb01,
310 0xb60480b6,
311 0x1bf40192,
312 0x08e4b6e8,
313 0xf1f2efbc,
314 0xb6085c87,
315 0x94bd0684,
316 0xd00399f0,
317 0x00f80089,
318/* 0x02ec: error */
319 0xe7f1e0f9,
320 0xe3f09814,
321 0x8d21f440,
322 0x041ce0b7,
323 0xf401f7f0,
324 0xe0fc8d21,
325/* 0x0306: init */
326 0x04bd00f8,
327 0xf10004fe,
328 0xf0120017,
329 0x12d00227,
330 0x3e17f100,
331 0x0010fe04,
332 0x040017f1,
333 0xf0c010d0,
334 0x12d00427,
335 0x1031f400,
336 0x060817f1,
337 0xcf0614b6,
338 0x37f00012,
339 0x1f24f001,
340 0xb60432bb,
341 0x02800132,
342 0x04038003,
343 0x040010b7,
344 0x800012cf,
345 0x27f10002,
346 0x24b60800,
347 0x0022cf06,
348/* 0x035f: init_find_chipset */
349 0xb65817f0,
350 0x13980c10,
351 0x0432b800,
352 0xb00b0bf4,
353 0x1bf40034,
354/* 0x0373: init_context */
355 0xf100f8f1,
356 0xb6080027,
357 0x22cf0624,
358 0xf134bd40,
359 0xb6070047,
360 0x25950644,
361 0x0045d008,
362 0xbd4045d0,
363 0x58f4bde4,
364 0x1f58021e,
365 0x020e4003,
366 0xf5040f40,
367 0xbb013d21,
368 0x3fbb002f,
369 0x041e5800,
370 0x40051f58,
371 0x0f400a0e,
372 0x3d21f50c,
373 0x030e9801,
374 0xbb00effd,
375 0x3ebb002e,
376 0x0040b700,
377 0x0235b613,
378 0xb60043d0,
379 0x35b60825,
380 0x0120b606,
381 0xb60130b6,
382 0x34b60824,
383 0x022fb908,
384 0x026321f5,
385 0xf1003fbb,
386 0xb6080017,
387 0x13d00614,
388 0x0010b740,
389 0xf024bd08,
390 0x12d01f29,
391/* 0x0401: main */
392 0x0031f400,
393 0xf00028f4,
394 0x21f41cd7,
395 0xf401f439,
396 0xf404e4b0,
397 0x81fe1e18,
398 0x0627f001,
399 0x12fd20bd,
400 0x01e4b604,
401 0xfe051efd,
402 0x21f50018,
403 0x0ef404c3,
404/* 0x0431: main_not_ctx_xfer */
405 0x10ef94d3,
406 0xf501f5f0,
407 0xf402ec21,
408/* 0x043e: ih */
409 0x80f9c60e,
410 0xf90188fe,
411 0xf990f980,
412 0xf9b0f9a0,
413 0xf9e0f9d0,
414 0x800acff0,
415 0xf404abc4,
416 0xb7f11d0b,
417 0xd7f01900,
418 0x40becf1c,
419 0xf400bfcf,
420 0xb0b70421,
421 0xe7f00400,
422 0x00bed001,
423/* 0x0474: ih_no_fifo */
424 0xfc400ad0,
425 0xfce0fcf0,
426 0xfcb0fcd0,
427 0xfc90fca0,
428 0x0088fe80,
429 0x32f480fc,
430/* 0x048f: hub_barrier_done */
431 0xf001f800,
432 0x0e9801f7,
433 0x04febb00,
434 0x9418e7f1,
435 0xf440e3f0,
436 0x00f88d21,
437/* 0x04a4: ctx_redswitch */
438 0x0614e7f1,
439 0xf006e4b6,
440 0xefd020f7,
441 0x08f7f000,
442/* 0x04b4: ctx_redswitch_delay */
443 0xf401f2b6,
444 0xf7f1fd1b,
445 0xefd00a20,
446/* 0x04c3: ctx_xfer */
447 0xf100f800,
448 0xb60a0417,
449 0x1fd00614,
450 0x0711f400,
451 0x04a421f5,
452/* 0x04d4: ctx_xfer_not_load */
453 0x4afc17f1,
454 0xf00213f0,
455 0x12d00c27,
456 0x0721f500,
457 0xfc27f102,
458 0x0223f047,
459 0xf00020d0,
460 0x20b6012c,
461 0x0012d003,
462 0xf001acf0,
463 0xb7f002a5,
464 0x50b3f000,
465 0xb6000c98,
466 0xbcbb0fc4,
467 0x010c9800,
468 0xf0020d98,
469 0x21f500e7,
470 0xacf0015c,
471 0x04a5f001,
472 0x4000b7f1,
473 0x9850b3f0,
474 0xc4b6000c,
475 0x00bcbb0f,
476 0x98050c98,
477 0x0f98060d,
478 0x00e7f104,
479 0x5c21f508,
480 0x0721f501,
481 0x0601f402,
482/* 0x054b: ctx_xfer_post */
483 0xf11412f4,
484 0xf04afc17,
485 0x27f00213,
486 0x0012d00d,
487 0x020721f5,
488/* 0x055c: ctx_xfer_done */
489 0x048f21f5,
490 0x000000f8,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x00000000,
502 0x00000000,
503 0x00000000,
504 0x00000000,
505 0x00000000,
506 0x00000000,
507 0x00000000,
508 0x00000000,
509 0x00000000,
510 0x00000000,
511 0x00000000,
512 0x00000000,
513 0x00000000,
514 0x00000000,
515 0x00000000,
516 0x00000000,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x00000000,
521 0x00000000,
522 0x00000000,
523 0x00000000,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x00000000,
528 0x00000000,
529 0x00000000,
530};
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index 98acddb2c5bb..acfc457654bd 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -24,11 +24,11 @@
24 */ 24 */
25 25
26/* To build: 26/* To build:
27 * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h 27 * m4 hubnvc0.fuc | envyas -a -w -m fuc -V fuc3 -o hubnvc0.fuc.h
28 */ 28 */
29 29
30.section #nvc0_grhub_data 30.section #nvc0_grhub_data
31include(`nvc0_graph.fuc') 31include(`nvc0.fuc')
32gpc_count: .b32 0 32gpc_count: .b32 0
33rop_count: .b32 0 33rop_count: .b32 0
34cmd_queue: queue_init 34cmd_queue: queue_init
@@ -161,11 +161,11 @@ xfer_data: .b32 0
161.section #nvc0_grhub_code 161.section #nvc0_grhub_code
162bra #init 162bra #init
163define(`include_code') 163define(`include_code')
164include(`nvc0_graph.fuc') 164include(`nvc0.fuc')
165 165
166// reports an exception to the host 166// reports an exception to the host
167// 167//
168// In: $r15 error code (see nvc0_graph.fuc) 168// In: $r15 error code (see nvc0.fuc)
169// 169//
170error: 170error:
171 push $r14 171 push $r14
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index c5ed307abeb9..85a8d556f484 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -1,6 +1,9 @@
1uint32_t nvc0_grhub_data[] = { 1uint32_t nvc0_grhub_data[] = {
2/* 0x0000: gpc_count */
2 0x00000000, 3 0x00000000,
4/* 0x0004: rop_count */
3 0x00000000, 5 0x00000000,
6/* 0x0008: cmd_queue */
4 0x00000000, 7 0x00000000,
5 0x00000000, 8 0x00000000,
6 0x00000000, 9 0x00000000,
@@ -19,9 +22,13 @@ uint32_t nvc0_grhub_data[] = {
19 0x00000000, 22 0x00000000,
20 0x00000000, 23 0x00000000,
21 0x00000000, 24 0x00000000,
25/* 0x0050: hub_mmio_list_head */
22 0x00000000, 26 0x00000000,
27/* 0x0054: hub_mmio_list_tail */
23 0x00000000, 28 0x00000000,
29/* 0x0058: ctx_current */
24 0x00000000, 30 0x00000000,
31/* 0x005c: chipsets */
25 0x000000c0, 32 0x000000c0,
26 0x013c00a0, 33 0x013c00a0,
27 0x000000c1, 34 0x000000c1,
@@ -39,6 +46,7 @@ uint32_t nvc0_grhub_data[] = {
39 0x000000d9, 46 0x000000d9,
40 0x01dc0140, 47 0x01dc0140,
41 0x00000000, 48 0x00000000,
49/* 0x00a0: nvc0_hub_mmio_head */
42 0x0417e91c, 50 0x0417e91c,
43 0x04400204, 51 0x04400204,
44 0x28404004, 52 0x28404004,
@@ -78,7 +86,10 @@ uint32_t nvc0_grhub_data[] = {
78 0x08408800, 86 0x08408800,
79 0x0c408900, 87 0x0c408900,
80 0x00408980, 88 0x00408980,
89/* 0x013c: nvc0_hub_mmio_tail */
81 0x044064c0, 90 0x044064c0,
91/* 0x0140: nvc1_hub_mmio_tail */
92/* 0x0140: nvd9_hub_mmio_head */
82 0x0417e91c, 93 0x0417e91c,
83 0x04400204, 94 0x04400204,
84 0x24404004, 95 0x24404004,
@@ -118,6 +129,7 @@ uint32_t nvc0_grhub_data[] = {
118 0x08408800, 129 0x08408800,
119 0x0c408900, 130 0x0c408900,
120 0x00408980, 131 0x00408980,
132/* 0x01dc: nvd9_hub_mmio_tail */
121 0x00000000, 133 0x00000000,
122 0x00000000, 134 0x00000000,
123 0x00000000, 135 0x00000000,
@@ -127,7 +139,10 @@ uint32_t nvc0_grhub_data[] = {
127 0x00000000, 139 0x00000000,
128 0x00000000, 140 0x00000000,
129 0x00000000, 141 0x00000000,
142/* 0x0200: chan_data */
143/* 0x0200: chan_mmio_count */
130 0x00000000, 144 0x00000000,
145/* 0x0204: chan_mmio_address */
131 0x00000000, 146 0x00000000,
132 0x00000000, 147 0x00000000,
133 0x00000000, 148 0x00000000,
@@ -191,17 +206,20 @@ uint32_t nvc0_grhub_data[] = {
191 0x00000000, 206 0x00000000,
192 0x00000000, 207 0x00000000,
193 0x00000000, 208 0x00000000,
209/* 0x0300: xfer_data */
194 0x00000000, 210 0x00000000,
195}; 211};
196 212
197uint32_t nvc0_grhub_code[] = { 213uint32_t nvc0_grhub_code[] = {
198 0x03090ef5, 214 0x03090ef5,
215/* 0x0004: queue_put */
199 0x9800d898, 216 0x9800d898,
200 0x86f001d9, 217 0x86f001d9,
201 0x0489b808, 218 0x0489b808,
202 0xf00c1bf4, 219 0xf00c1bf4,
203 0x21f502f7, 220 0x21f502f7,
204 0x00f802ec, 221 0x00f802ec,
222/* 0x001c: queue_put_next */
205 0xb60798c4, 223 0xb60798c4,
206 0x8dbb0384, 224 0x8dbb0384,
207 0x0880b600, 225 0x0880b600,
@@ -209,6 +227,7 @@ uint32_t nvc0_grhub_code[] = {
209 0x90b6018f, 227 0x90b6018f,
210 0x0f94f001, 228 0x0f94f001,
211 0xf801d980, 229 0xf801d980,
230/* 0x0039: queue_get */
212 0x0131f400, 231 0x0131f400,
213 0x9800d898, 232 0x9800d898,
214 0x89b801d9, 233 0x89b801d9,
@@ -220,37 +239,46 @@ uint32_t nvc0_grhub_code[] = {
220 0x80b6019f, 239 0x80b6019f,
221 0x0f84f001, 240 0x0f84f001,
222 0xf400d880, 241 0xf400d880,
242/* 0x0066: queue_get_done */
223 0x00f80132, 243 0x00f80132,
244/* 0x0068: nv_rd32 */
224 0x0728b7f1, 245 0x0728b7f1,
225 0xb906b4b6, 246 0xb906b4b6,
226 0xc9f002ec, 247 0xc9f002ec,
227 0x00bcd01f, 248 0x00bcd01f,
249/* 0x0078: nv_rd32_wait */
228 0xc800bccf, 250 0xc800bccf,
229 0x1bf41fcc, 251 0x1bf41fcc,
230 0x06a7f0fa, 252 0x06a7f0fa,
231 0x010321f5, 253 0x010321f5,
232 0xf840bfcf, 254 0xf840bfcf,
255/* 0x008d: nv_wr32 */
233 0x28b7f100, 256 0x28b7f100,
234 0x06b4b607, 257 0x06b4b607,
235 0xb980bfd0, 258 0xb980bfd0,
236 0xc9f002ec, 259 0xc9f002ec,
237 0x1ec9f01f, 260 0x1ec9f01f,
261/* 0x00a3: nv_wr32_wait */
238 0xcf00bcd0, 262 0xcf00bcd0,
239 0xccc800bc, 263 0xccc800bc,
240 0xfa1bf41f, 264 0xfa1bf41f,
265/* 0x00ae: watchdog_reset */
241 0x87f100f8, 266 0x87f100f8,
242 0x84b60430, 267 0x84b60430,
243 0x1ff9f006, 268 0x1ff9f006,
244 0xf8008fd0, 269 0xf8008fd0,
270/* 0x00bd: watchdog_clear */
245 0x3087f100, 271 0x3087f100,
246 0x0684b604, 272 0x0684b604,
247 0xf80080d0, 273 0xf80080d0,
274/* 0x00c9: wait_donez */
248 0x3c87f100, 275 0x3c87f100,
249 0x0684b608, 276 0x0684b608,
250 0x99f094bd, 277 0x99f094bd,
251 0x0089d000, 278 0x0089d000,
252 0x081887f1, 279 0x081887f1,
253 0xd00684b6, 280 0xd00684b6,
281/* 0x00e2: wait_done_wait_donez */
254 0x87f1008a, 282 0x87f1008a,
255 0x84b60400, 283 0x84b60400,
256 0x0088cf06, 284 0x0088cf06,
@@ -259,6 +287,7 @@ uint32_t nvc0_grhub_code[] = {
259 0x84b6085c, 287 0x84b6085c,
260 0xf094bd06, 288 0xf094bd06,
261 0x89d00099, 289 0x89d00099,
290/* 0x0103: wait_doneo */
262 0xf100f800, 291 0xf100f800,
263 0xb6083c87, 292 0xb6083c87,
264 0x94bd0684, 293 0x94bd0684,
@@ -266,6 +295,7 @@ uint32_t nvc0_grhub_code[] = {
266 0x87f10089, 295 0x87f10089,
267 0x84b60818, 296 0x84b60818,
268 0x008ad006, 297 0x008ad006,
298/* 0x011c: wait_done_wait_doneo */
269 0x040087f1, 299 0x040087f1,
270 0xcf0684b6, 300 0xcf0684b6,
271 0x8aff0088, 301 0x8aff0088,
@@ -274,6 +304,8 @@ uint32_t nvc0_grhub_code[] = {
274 0xbd0684b6, 304 0xbd0684b6,
275 0x0099f094, 305 0x0099f094,
276 0xf80089d0, 306 0xf80089d0,
307/* 0x013d: mmctx_size */
308/* 0x013f: nv_mmctx_size_loop */
277 0x9894bd00, 309 0x9894bd00,
278 0x85b600e8, 310 0x85b600e8,
279 0x0180b61a, 311 0x0180b61a,
@@ -282,6 +314,7 @@ uint32_t nvc0_grhub_code[] = {
282 0x04efb804, 314 0x04efb804,
283 0xb9eb1bf4, 315 0xb9eb1bf4,
284 0x00f8029f, 316 0x00f8029f,
317/* 0x015c: mmctx_xfer */
285 0x083c87f1, 318 0x083c87f1,
286 0xbd0684b6, 319 0xbd0684b6,
287 0x0199f094, 320 0x0199f094,
@@ -291,9 +324,11 @@ uint32_t nvc0_grhub_code[] = {
291 0xf405bbfd, 324 0xf405bbfd,
292 0x8bd0090b, 325 0x8bd0090b,
293 0x0099f000, 326 0x0099f000,
327/* 0x0180: mmctx_base_disabled */
294 0xf405eefd, 328 0xf405eefd,
295 0x8ed00c0b, 329 0x8ed00c0b,
296 0xc08fd080, 330 0xc08fd080,
331/* 0x018f: mmctx_multi_disabled */
297 0xb70199f0, 332 0xb70199f0,
298 0xc8010080, 333 0xc8010080,
299 0xb4b600ab, 334 0xb4b600ab,
@@ -301,6 +336,8 @@ uint32_t nvc0_grhub_code[] = {
301 0xb601aec8, 336 0xb601aec8,
302 0xbefd11e4, 337 0xbefd11e4,
303 0x008bd005, 338 0x008bd005,
339/* 0x01a8: mmctx_exec_loop */
340/* 0x01a8: mmctx_wait_free */
304 0xf0008ecf, 341 0xf0008ecf,
305 0x0bf41fe4, 342 0x0bf41fe4,
306 0x00ce98fa, 343 0x00ce98fa,
@@ -309,34 +346,42 @@ uint32_t nvc0_grhub_code[] = {
309 0x04cdb804, 346 0x04cdb804,
310 0xc8e81bf4, 347 0xc8e81bf4,
311 0x1bf402ab, 348 0x1bf402ab,
349/* 0x01c9: mmctx_fini_wait */
312 0x008bcf18, 350 0x008bcf18,
313 0xb01fb4f0, 351 0xb01fb4f0,
314 0x1bf410b4, 352 0x1bf410b4,
315 0x02a7f0f7, 353 0x02a7f0f7,
316 0xf4c921f4, 354 0xf4c921f4,
355/* 0x01de: mmctx_stop */
317 0xabc81b0e, 356 0xabc81b0e,
318 0x10b4b600, 357 0x10b4b600,
319 0xf00cb9f0, 358 0xf00cb9f0,
320 0x8bd012b9, 359 0x8bd012b9,
360/* 0x01ed: mmctx_stop_wait */
321 0x008bcf00, 361 0x008bcf00,
322 0xf412bbc8, 362 0xf412bbc8,
363/* 0x01f6: mmctx_done */
323 0x87f1fa1b, 364 0x87f1fa1b,
324 0x84b6085c, 365 0x84b6085c,
325 0xf094bd06, 366 0xf094bd06,
326 0x89d00199, 367 0x89d00199,
368/* 0x0207: strand_wait */
327 0xf900f800, 369 0xf900f800,
328 0x02a7f0a0, 370 0x02a7f0a0,
329 0xfcc921f4, 371 0xfcc921f4,
372/* 0x0213: strand_pre */
330 0xf100f8a0, 373 0xf100f8a0,
331 0xf04afc87, 374 0xf04afc87,
332 0x97f00283, 375 0x97f00283,
333 0x0089d00c, 376 0x0089d00c,
334 0x020721f5, 377 0x020721f5,
378/* 0x0226: strand_post */
335 0x87f100f8, 379 0x87f100f8,
336 0x83f04afc, 380 0x83f04afc,
337 0x0d97f002, 381 0x0d97f002,
338 0xf50089d0, 382 0xf50089d0,
339 0xf8020721, 383 0xf8020721,
384/* 0x0239: strand_set */
340 0xfca7f100, 385 0xfca7f100,
341 0x02a3f04f, 386 0x02a3f04f,
342 0x0500aba2, 387 0x0500aba2,
@@ -347,6 +392,7 @@ uint32_t nvc0_grhub_code[] = {
347 0xf000aed0, 392 0xf000aed0,
348 0xbcd00ac7, 393 0xbcd00ac7,
349 0x0721f500, 394 0x0721f500,
395/* 0x0263: strand_ctx_init */
350 0xf100f802, 396 0xf100f802,
351 0xb6083c87, 397 0xb6083c87,
352 0x94bd0684, 398 0x94bd0684,
@@ -369,6 +415,7 @@ uint32_t nvc0_grhub_code[] = {
369 0x0684b608, 415 0x0684b608,
370 0xb70089cf, 416 0xb70089cf,
371 0x95220080, 417 0x95220080,
418/* 0x02ba: ctx_init_strand_loop */
372 0x8ed008fe, 419 0x8ed008fe,
373 0x408ed000, 420 0x408ed000,
374 0xb6808acf, 421 0xb6808acf,
@@ -382,6 +429,7 @@ uint32_t nvc0_grhub_code[] = {
382 0x94bd0684, 429 0x94bd0684,
383 0xd00399f0, 430 0xd00399f0,
384 0x00f80089, 431 0x00f80089,
432/* 0x02ec: error */
385 0xe7f1e0f9, 433 0xe7f1e0f9,
386 0xe4b60814, 434 0xe4b60814,
387 0x00efd006, 435 0x00efd006,
@@ -389,6 +437,7 @@ uint32_t nvc0_grhub_code[] = {
389 0xf006e4b6, 437 0xf006e4b6,
390 0xefd001f7, 438 0xefd001f7,
391 0xf8e0fc00, 439 0xf8e0fc00,
440/* 0x0309: init */
392 0xfe04bd00, 441 0xfe04bd00,
393 0x07fe0004, 442 0x07fe0004,
394 0x0017f100, 443 0x0017f100,
@@ -429,11 +478,13 @@ uint32_t nvc0_grhub_code[] = {
429 0x080027f1, 478 0x080027f1,
430 0xcf0624b6, 479 0xcf0624b6,
431 0xf7f00022, 480 0xf7f00022,
481/* 0x03a9: init_find_chipset */
432 0x08f0b654, 482 0x08f0b654,
433 0xb800f398, 483 0xb800f398,
434 0x0bf40432, 484 0x0bf40432,
435 0x0034b00b, 485 0x0034b00b,
436 0xf8f11bf4, 486 0xf8f11bf4,
487/* 0x03bd: init_context */
437 0x0017f100, 488 0x0017f100,
438 0x02fe5801, 489 0x02fe5801,
439 0xf003ff58, 490 0xf003ff58,
@@ -454,6 +505,7 @@ uint32_t nvc0_grhub_code[] = {
454 0x001fbb02, 505 0x001fbb02,
455 0xf1000398, 506 0xf1000398,
456 0xf0200047, 507 0xf0200047,
508/* 0x040e: init_gpc */
457 0x4ea05043, 509 0x4ea05043,
458 0x1fb90804, 510 0x1fb90804,
459 0x8d21f402, 511 0x8d21f402,
@@ -467,6 +519,7 @@ uint32_t nvc0_grhub_code[] = {
467 0xf7f00100, 519 0xf7f00100,
468 0x8d21f402, 520 0x8d21f402,
469 0x08004ea0, 521 0x08004ea0,
522/* 0x0440: init_gpc_wait */
470 0xc86821f4, 523 0xc86821f4,
471 0x0bf41fff, 524 0x0bf41fff,
472 0x044ea0fa, 525 0x044ea0fa,
@@ -479,6 +532,7 @@ uint32_t nvc0_grhub_code[] = {
479 0xb74021d0, 532 0xb74021d0,
480 0xbd080020, 533 0xbd080020,
481 0x1f19f014, 534 0x1f19f014,
535/* 0x0473: main */
482 0xf40021d0, 536 0xf40021d0,
483 0x28f40031, 537 0x28f40031,
484 0x08d7f000, 538 0x08d7f000,
@@ -517,6 +571,7 @@ uint32_t nvc0_grhub_code[] = {
517 0x94bd0684, 571 0x94bd0684,
518 0xd00699f0, 572 0xd00699f0,
519 0x0ef40089, 573 0x0ef40089,
574/* 0x0509: chsw_prev_no_next */
520 0xb920f931, 575 0xb920f931,
521 0x32f40212, 576 0x32f40212,
522 0x0232f401, 577 0x0232f401,
@@ -524,10 +579,12 @@ uint32_t nvc0_grhub_code[] = {
524 0x17f120fc, 579 0x17f120fc,
525 0x14b60b00, 580 0x14b60b00,
526 0x0012d006, 581 0x0012d006,
582/* 0x0527: chsw_no_prev */
527 0xc8130ef4, 583 0xc8130ef4,
528 0x0bf41f23, 584 0x0bf41f23,
529 0x0131f40d, 585 0x0131f40d,
530 0xf50232f4, 586 0xf50232f4,
587/* 0x0537: chsw_done */
531 0xf1082921, 588 0xf1082921,
532 0xb60b0c17, 589 0xb60b0c17,
533 0x27f00614, 590 0x27f00614,
@@ -536,10 +593,12 @@ uint32_t nvc0_grhub_code[] = {
536 0xbd0684b6, 593 0xbd0684b6,
537 0x0499f094, 594 0x0499f094,
538 0xf50089d0, 595 0xf50089d0,
596/* 0x0557: main_not_ctx_switch */
539 0xb0ff200e, 597 0xb0ff200e,
540 0x1bf401e4, 598 0x1bf401e4,
541 0x02f2b90d, 599 0x02f2b90d,
542 0x07b521f5, 600 0x07b521f5,
601/* 0x0567: main_not_ctx_chan */
543 0xb0420ef4, 602 0xb0420ef4,
544 0x1bf402e4, 603 0x1bf402e4,
545 0x3c87f12e, 604 0x3c87f12e,
@@ -553,14 +612,17 @@ uint32_t nvc0_grhub_code[] = {
553 0xf094bd06, 612 0xf094bd06,
554 0x89d00799, 613 0x89d00799,
555 0x110ef400, 614 0x110ef400,
615/* 0x0598: main_not_ctx_save */
556 0xf010ef94, 616 0xf010ef94,
557 0x21f501f5, 617 0x21f501f5,
558 0x0ef502ec, 618 0x0ef502ec,
619/* 0x05a6: main_done */
559 0x17f1fed1, 620 0x17f1fed1,
560 0x14b60820, 621 0x14b60820,
561 0xf024bd06, 622 0xf024bd06,
562 0x12d01f29, 623 0x12d01f29,
563 0xbe0ef500, 624 0xbe0ef500,
625/* 0x05b9: ih */
564 0xfe80f9fe, 626 0xfe80f9fe,
565 0x80f90188, 627 0x80f90188,
566 0xa0f990f9, 628 0xa0f990f9,
@@ -574,16 +636,19 @@ uint32_t nvc0_grhub_code[] = {
574 0x21f400bf, 636 0x21f400bf,
575 0x00b0b704, 637 0x00b0b704,
576 0x01e7f004, 638 0x01e7f004,
639/* 0x05ef: ih_no_fifo */
577 0xe400bed0, 640 0xe400bed0,
578 0xf40100ab, 641 0xf40100ab,
579 0xd7f00d0b, 642 0xd7f00d0b,
580 0x01e7f108, 643 0x01e7f108,
581 0x0421f440, 644 0x0421f440,
645/* 0x0600: ih_no_ctxsw */
582 0x0104b7f1, 646 0x0104b7f1,
583 0xabffb0bd, 647 0xabffb0bd,
584 0x0d0bf4b4, 648 0x0d0bf4b4,
585 0x0c1ca7f1, 649 0x0c1ca7f1,
586 0xd006a4b6, 650 0xd006a4b6,
651/* 0x0616: ih_no_other */
587 0x0ad000ab, 652 0x0ad000ab,
588 0xfcf0fc40, 653 0xfcf0fc40,
589 0xfcd0fce0, 654 0xfcd0fce0,
@@ -591,32 +656,40 @@ uint32_t nvc0_grhub_code[] = {
591 0xfe80fc90, 656 0xfe80fc90,
592 0x80fc0088, 657 0x80fc0088,
593 0xf80032f4, 658 0xf80032f4,
659/* 0x0631: ctx_4160s */
594 0x60e7f101, 660 0x60e7f101,
595 0x40e3f041, 661 0x40e3f041,
596 0xf401f7f0, 662 0xf401f7f0,
663/* 0x063e: ctx_4160s_wait */
597 0x21f48d21, 664 0x21f48d21,
598 0x04ffc868, 665 0x04ffc868,
599 0xf8fa0bf4, 666 0xf8fa0bf4,
667/* 0x0649: ctx_4160c */
600 0x60e7f100, 668 0x60e7f100,
601 0x40e3f041, 669 0x40e3f041,
602 0x21f4f4bd, 670 0x21f4f4bd,
671/* 0x0657: ctx_4170s */
603 0xf100f88d, 672 0xf100f88d,
604 0xf04170e7, 673 0xf04170e7,
605 0xf5f040e3, 674 0xf5f040e3,
606 0x8d21f410, 675 0x8d21f410,
676/* 0x0666: ctx_4170w */
607 0xe7f100f8, 677 0xe7f100f8,
608 0xe3f04170, 678 0xe3f04170,
609 0x6821f440, 679 0x6821f440,
610 0xf410f4f0, 680 0xf410f4f0,
611 0x00f8f31b, 681 0x00f8f31b,
682/* 0x0678: ctx_redswitch */
612 0x0614e7f1, 683 0x0614e7f1,
613 0xf106e4b6, 684 0xf106e4b6,
614 0xd00270f7, 685 0xd00270f7,
615 0xf7f000ef, 686 0xf7f000ef,
687/* 0x0689: ctx_redswitch_delay */
616 0x01f2b608, 688 0x01f2b608,
617 0xf1fd1bf4, 689 0xf1fd1bf4,
618 0xd00770f7, 690 0xd00770f7,
619 0x00f800ef, 691 0x00f800ef,
692/* 0x0698: ctx_86c */
620 0x086ce7f1, 693 0x086ce7f1,
621 0xd006e4b6, 694 0xd006e4b6,
622 0xe7f100ef, 695 0xe7f100ef,
@@ -625,6 +698,7 @@ uint32_t nvc0_grhub_code[] = {
625 0xa86ce7f1, 698 0xa86ce7f1,
626 0xf441e3f0, 699 0xf441e3f0,
627 0x00f88d21, 700 0x00f88d21,
701/* 0x06b8: ctx_load */
628 0x083c87f1, 702 0x083c87f1,
629 0xbd0684b6, 703 0xbd0684b6,
630 0x0599f094, 704 0x0599f094,
@@ -639,6 +713,7 @@ uint32_t nvc0_grhub_code[] = {
639 0x0614b60a, 713 0x0614b60a,
640 0xd00747f0, 714 0xd00747f0,
641 0x14d00012, 715 0x14d00012,
716/* 0x06f1: ctx_chan_wait_0 */
642 0x4014cf40, 717 0x4014cf40,
643 0xf41f44f0, 718 0xf41f44f0,
644 0x32d0fa1b, 719 0x32d0fa1b,
@@ -688,6 +763,7 @@ uint32_t nvc0_grhub_code[] = {
688 0xbd0684b6, 763 0xbd0684b6,
689 0x0599f094, 764 0x0599f094,
690 0xf80089d0, 765 0xf80089d0,
766/* 0x07b5: ctx_chan */
691 0x3121f500, 767 0x3121f500,
692 0xb821f506, 768 0xb821f506,
693 0x0ca7f006, 769 0x0ca7f006,
@@ -695,39 +771,48 @@ uint32_t nvc0_grhub_code[] = {
695 0xb60a1017, 771 0xb60a1017,
696 0x27f00614, 772 0x27f00614,
697 0x0012d005, 773 0x0012d005,
774/* 0x07d0: ctx_chan_wait */
698 0xfd0012cf, 775 0xfd0012cf,
699 0x1bf40522, 776 0x1bf40522,
700 0x4921f5fa, 777 0x4921f5fa,
778/* 0x07df: ctx_mmio_exec */
701 0x9800f806, 779 0x9800f806,
702 0x27f18103, 780 0x27f18103,
703 0x24b60a04, 781 0x24b60a04,
704 0x0023d006, 782 0x0023d006,
783/* 0x07ee: ctx_mmio_loop */
705 0x34c434bd, 784 0x34c434bd,
706 0x0f1bf4ff, 785 0x0f1bf4ff,
707 0x030057f1, 786 0x030057f1,
708 0xfa0653f0, 787 0xfa0653f0,
709 0x03f80535, 788 0x03f80535,
789/* 0x0800: ctx_mmio_pull */
710 0x98c04e98, 790 0x98c04e98,
711 0x21f4c14f, 791 0x21f4c14f,
712 0x0830b68d, 792 0x0830b68d,
713 0xf40112b6, 793 0xf40112b6,
794/* 0x0812: ctx_mmio_done */
714 0x0398df1b, 795 0x0398df1b,
715 0x0023d016, 796 0x0023d016,
716 0xf1800080, 797 0xf1800080,
717 0xf0020017, 798 0xf0020017,
718 0x01fa0613, 799 0x01fa0613,
719 0xf803f806, 800 0xf803f806,
801/* 0x0829: ctx_xfer */
720 0x0611f400, 802 0x0611f400,
803/* 0x082f: ctx_xfer_pre */
721 0xf01102f4, 804 0xf01102f4,
722 0x21f510f7, 805 0x21f510f7,
723 0x21f50698, 806 0x21f50698,
724 0x11f40631, 807 0x11f40631,
808/* 0x083d: ctx_xfer_pre_load */
725 0x02f7f01c, 809 0x02f7f01c,
726 0x065721f5, 810 0x065721f5,
727 0x066621f5, 811 0x066621f5,
728 0x067821f5, 812 0x067821f5,
729 0x21f5f4bd, 813 0x21f5f4bd,
730 0x21f50657, 814 0x21f50657,
815/* 0x0856: ctx_xfer_exec */
731 0x019806b8, 816 0x019806b8,
732 0x1427f116, 817 0x1427f116,
733 0x0624b604, 818 0x0624b604,
@@ -762,9 +847,11 @@ uint32_t nvc0_grhub_code[] = {
762 0x0a1017f1, 847 0x0a1017f1,
763 0xf00614b6, 848 0xf00614b6,
764 0x12d00527, 849 0x12d00527,
850/* 0x08dd: ctx_xfer_post_save_wait */
765 0x0012cf00, 851 0x0012cf00,
766 0xf40522fd, 852 0xf40522fd,
767 0x02f4fa1b, 853 0x02f4fa1b,
854/* 0x08e9: ctx_xfer_post */
768 0x02f7f032, 855 0x02f7f032,
769 0x065721f5, 856 0x065721f5,
770 0x21f5f4bd, 857 0x21f5f4bd,
@@ -776,7 +863,9 @@ uint32_t nvc0_grhub_code[] = {
776 0x11fd8001, 863 0x11fd8001,
777 0x070bf405, 864 0x070bf405,
778 0x07df21f5, 865 0x07df21f5,
866/* 0x0914: ctx_xfer_no_post_mmio */
779 0x064921f5, 867 0x064921f5,
868/* 0x0918: ctx_xfer_done */
780 0x000000f8, 869 0x000000f8,
781 0x00000000, 870 0x00000000,
782 0x00000000, 871 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
new file mode 100644
index 000000000000..138eeaa28665
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -0,0 +1,780 @@
1/* fuc microcode for nve0 PGRAPH/HUB
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26/* To build:
27 * m4 nve0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nve0_grhub.fuc.h
28 */
29
30.section #nve0_grhub_data
31include(`nve0.fuc')
32gpc_count: .b32 0
33rop_count: .b32 0
34cmd_queue: queue_init
35hub_mmio_list_head: .b32 0
36hub_mmio_list_tail: .b32 0
37
38ctx_current: .b32 0
39
40chipsets:
41.b8 0xe4 0 0 0
42.b16 #nve4_hub_mmio_head
43.b16 #nve4_hub_mmio_tail
44.b8 0xe7 0 0 0
45.b16 #nve4_hub_mmio_head
46.b16 #nve4_hub_mmio_tail
47.b8 0 0 0 0
48
49nve4_hub_mmio_head:
50mmctx_data(0x17e91c, 2)
51mmctx_data(0x400204, 2)
52mmctx_data(0x404010, 7)
53mmctx_data(0x4040a8, 9)
54mmctx_data(0x4040d0, 7)
55mmctx_data(0x4040f8, 1)
56mmctx_data(0x404130, 3)
57mmctx_data(0x404150, 3)
58mmctx_data(0x404164, 1)
59mmctx_data(0x4041a0, 4)
60mmctx_data(0x404200, 4)
61mmctx_data(0x404404, 14)
62mmctx_data(0x404460, 4)
63mmctx_data(0x404480, 1)
64mmctx_data(0x404498, 1)
65mmctx_data(0x404604, 4)
66mmctx_data(0x404618, 4)
67mmctx_data(0x40462c, 2)
68mmctx_data(0x404640, 1)
69mmctx_data(0x404654, 1)
70mmctx_data(0x404660, 1)
71mmctx_data(0x404678, 19)
72mmctx_data(0x4046c8, 3)
73mmctx_data(0x404700, 3)
74mmctx_data(0x404718, 10)
75mmctx_data(0x404744, 2)
76mmctx_data(0x404754, 1)
77mmctx_data(0x405800, 1)
78mmctx_data(0x405830, 3)
79mmctx_data(0x405854, 1)
80mmctx_data(0x405870, 4)
81mmctx_data(0x405a00, 2)
82mmctx_data(0x405a18, 1)
83mmctx_data(0x405b00, 1)
84mmctx_data(0x405b10, 1)
85mmctx_data(0x406020, 1)
86mmctx_data(0x406028, 4)
87mmctx_data(0x4064a8, 2)
88mmctx_data(0x4064b4, 2)
89mmctx_data(0x4064c0, 12)
90mmctx_data(0x4064fc, 1)
91mmctx_data(0x407040, 1)
92mmctx_data(0x407804, 1)
93mmctx_data(0x40780c, 6)
94mmctx_data(0x4078bc, 1)
95mmctx_data(0x408000, 7)
96mmctx_data(0x408064, 1)
97mmctx_data(0x408800, 3)
98mmctx_data(0x408840, 1)
99mmctx_data(0x408900, 3)
100mmctx_data(0x408980, 1)
101nve4_hub_mmio_tail:
102
103.align 256
104chan_data:
105chan_mmio_count: .b32 0
106chan_mmio_address: .b32 0
107
108.align 256
109xfer_data: .b32 0
110
111.section #nve0_grhub_code
112bra #init
113define(`include_code')
114include(`nve0.fuc')
115
116// reports an exception to the host
117//
118// In: $r15 error code (see nve0.fuc)
119//
120error:
121 push $r14
122 mov $r14 0x814
123 shl b32 $r14 6
124 iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
125 mov $r14 0xc1c
126 shl b32 $r14 6
127 mov $r15 1
128 iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
129 pop $r14
130 ret
131
132// HUB fuc initialisation, executed by triggering ucode start, will
133// fall through to main loop after completion.
134//
135// Input:
136// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
137//
138// Output:
139// CC_SCRATCH[0]:
140// 31:31: set to signal completion
141// CC_SCRATCH[1]:
142// 31:0: total PGRAPH context size
143//
144init:
145 clear b32 $r0
146 mov $sp $r0
147 mov $xdbase $r0
148
149 // enable fifo access
150 mov $r1 0x1200
151 mov $r2 2
152 iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
153
154 // setup i0 handler, and route all interrupts to it
155 mov $r1 #ih
156 mov $iv0 $r1
157 mov $r1 0x400
158 iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
159
160 // route HUB_CHANNEL_SWITCH to fuc interrupt 8
161 mov $r3 0x404
162 shl b32 $r3 6
163 mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
164 iowr I[$r3 + 0x000] $r2
165
166 // not sure what these are, route them because NVIDIA does, and
167 // the IRQ handler will signal the host if we ever get one.. we
168 // may find out if/why we need to handle these if so..
169 //
170 mov $r2 0x2004
171 iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
172 mov $r2 0x200b
173 iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
174 mov $r2 0x200c
175 iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
176
177 // enable all INTR_UP interrupts
178 mov $r2 0xc24
179 shl b32 $r2 6
180 not b32 $r3 $r0
181 iowr I[$r2] $r3
182
183 // enable fifo, ctxsw, 9, 10, 15 interrupts
184 mov $r2 -0x78fc // 0x8704
185 sethi $r2 0
186 iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
187
188 // fifo level triggered, rest edge
189 sub b32 $r1 0x100
190 mov $r2 4
191 iowr I[$r1] $r2
192
193 // enable interrupts
194 bset $flags ie0
195
196 // fetch enabled GPC/ROP counts
197 mov $r14 -0x69fc // 0x409604
198 sethi $r14 0x400000
199 call #nv_rd32
200 extr $r1 $r15 16:20
201 st b32 D[$r0 + #rop_count] $r1
202 and $r15 0x1f
203 st b32 D[$r0 + #gpc_count] $r15
204
205 // set BAR_REQMASK to GPC mask
206 mov $r1 1
207 shl b32 $r1 $r15
208 sub b32 $r1 1
209 mov $r2 0x40c
210 shl b32 $r2 6
211 iowr I[$r2 + 0x000] $r1
212 iowr I[$r2 + 0x100] $r1
213
214 // find context data for this chipset
215 mov $r2 0x800
216 shl b32 $r2 6
217 iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
218 mov $r15 #chipsets - 8
219 init_find_chipset:
220 add b32 $r15 8
221 ld b32 $r3 D[$r15 + 0x00]
222 cmpu b32 $r3 $r2
223 bra e #init_context
224 cmpu b32 $r3 0
225 bra ne #init_find_chipset
226 // unknown chipset
227 ret
228
229 // context size calculation, reserve first 256 bytes for use by fuc
230 init_context:
231 mov $r1 256
232
233 // calculate size of mmio context data
234 ld b16 $r14 D[$r15 + 4]
235 ld b16 $r15 D[$r15 + 6]
236 sethi $r14 0
237 st b32 D[$r0 + #hub_mmio_list_head] $r14
238 st b32 D[$r0 + #hub_mmio_list_tail] $r15
239 call #mmctx_size
240
241 // set mmctx base addresses now so we don't have to do it later,
242 // they don't (currently) ever change
243 mov $r3 0x700
244 shl b32 $r3 6
245 shr b32 $r4 $r1 8
246 iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
247 iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
248 add b32 $r3 0x1300
249 add b32 $r1 $r15
250 shr b32 $r15 2
251 iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
252
253 // strands, base offset needs to be aligned to 256 bytes
254 shr b32 $r1 8
255 add b32 $r1 1
256 shl b32 $r1 8
257 mov b32 $r15 $r1
258 call #strand_ctx_init
259 add b32 $r1 $r15
260
261 // initialise each GPC in sequence by passing in the offset of its
262 // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
263 // has previously been uploaded by the host) running.
264 //
265 // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
266 // when it has completed, and return the size of its context data
267 // in GPCn_CC_SCRATCH[1]
268 //
269 ld b32 $r3 D[$r0 + #gpc_count]
270 mov $r4 0x2000
271 sethi $r4 0x500000
272 init_gpc:
273 // setup, and start GPC ucode running
274 add b32 $r14 $r4 0x804
275 mov b32 $r15 $r1
276 call #nv_wr32 // CC_SCRATCH[1] = ctx offset
277 add b32 $r14 $r4 0x800
278 mov b32 $r15 $r2
279 call #nv_wr32 // CC_SCRATCH[0] = chipset
280 add b32 $r14 $r4 0x10c
281 clear b32 $r15
282 call #nv_wr32
283 add b32 $r14 $r4 0x104
284 call #nv_wr32 // ENTRY
285 add b32 $r14 $r4 0x100
286 mov $r15 2 // CTRL_START_TRIGGER
287 call #nv_wr32 // CTRL
288
289 // wait for it to complete, and adjust context size
290 add b32 $r14 $r4 0x800
291 init_gpc_wait:
292 call #nv_rd32
293 xbit $r15 $r15 31
294 bra e #init_gpc_wait
295 add b32 $r14 $r4 0x804
296 call #nv_rd32
297 add b32 $r1 $r15
298
299 // next!
300 add b32 $r4 0x8000
301 sub b32 $r3 1
302 bra ne #init_gpc
303
304 // save context size, and tell host we're ready
305 mov $r2 0x800
306 shl b32 $r2 6
307 iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
308 add b32 $r2 0x800
309 clear b32 $r1
310 bset $r1 31
311 iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
312
313// Main program loop, very simple, sleeps until woken up by the interrupt
314// handler, pulls a command from the queue and executes its handler
315//
316main:
317 // sleep until we have something to do
318 bset $flags $p0
319 sleep $p0
320 mov $r13 #cmd_queue
321 call #queue_get
322 bra $p1 #main
323
324 // context switch, requested by GPU?
325 cmpu b32 $r14 0x4001
326 bra ne #main_not_ctx_switch
327 trace_set(T_AUTO)
328 mov $r1 0xb00
329 shl b32 $r1 6
330 iord $r2 I[$r1 + 0x100] // CHAN_NEXT
331 iord $r1 I[$r1 + 0x000] // CHAN_CUR
332
333 xbit $r3 $r1 31
334 bra e #chsw_no_prev
335 xbit $r3 $r2 31
336 bra e #chsw_prev_no_next
337 push $r2
338 mov b32 $r2 $r1
339 trace_set(T_SAVE)
340 bclr $flags $p1
341 bset $flags $p2
342 call #ctx_xfer
343 trace_clr(T_SAVE);
344 pop $r2
345 trace_set(T_LOAD);
346 bset $flags $p1
347 call #ctx_xfer
348 trace_clr(T_LOAD);
349 bra #chsw_done
350 chsw_prev_no_next:
351 push $r2
352 mov b32 $r2 $r1
353 bclr $flags $p1
354 bclr $flags $p2
355 call #ctx_xfer
356 pop $r2
357 mov $r1 0xb00
358 shl b32 $r1 6
359 iowr I[$r1] $r2
360 bra #chsw_done
361 chsw_no_prev:
362 xbit $r3 $r2 31
363 bra e #chsw_done
364 bset $flags $p1
365 bclr $flags $p2
366 call #ctx_xfer
367
368 // ack the context switch request
369 chsw_done:
370 mov $r1 0xb0c
371 shl b32 $r1 6
372 mov $r2 1
373 iowr I[$r1 + 0x000] $r2 // 0x409b0c
374 trace_clr(T_AUTO)
375 bra #main
376
377 // request to set current channel? (*not* a context switch)
378 main_not_ctx_switch:
379 cmpu b32 $r14 0x0001
380 bra ne #main_not_ctx_chan
381 mov b32 $r2 $r15
382 call #ctx_chan
383 bra #main_done
384
385 // request to store current channel context?
386 main_not_ctx_chan:
387 cmpu b32 $r14 0x0002
388 bra ne #main_not_ctx_save
389 trace_set(T_SAVE)
390 bclr $flags $p1
391 bclr $flags $p2
392 call #ctx_xfer
393 trace_clr(T_SAVE)
394 bra #main_done
395
396 main_not_ctx_save:
397 shl b32 $r15 $r14 16
398 or $r15 E_BAD_COMMAND
399 call #error
400 bra #main
401
402 main_done:
403 mov $r1 0x820
404 shl b32 $r1 6
405 clear b32 $r2
406 bset $r2 31
407 iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
408 bra #main
409
410// interrupt handler
411ih:
412 push $r8
413 mov $r8 $flags
414 push $r8
415 push $r9
416 push $r10
417 push $r11
418 push $r13
419 push $r14
420 push $r15
421
422 // incoming fifo command?
423 iord $r10 I[$r0 + 0x200] // INTR
424 and $r11 $r10 0x00000004
425 bra e #ih_no_fifo
426 // queue incoming fifo command for later processing
427 mov $r11 0x1900
428 mov $r13 #cmd_queue
429 iord $r14 I[$r11 + 0x100] // FIFO_CMD
430 iord $r15 I[$r11 + 0x000] // FIFO_DATA
431 call #queue_put
432 add b32 $r11 0x400
433 mov $r14 1
434 iowr I[$r11 + 0x000] $r14 // FIFO_ACK
435
436 // context switch request?
437 ih_no_fifo:
438 and $r11 $r10 0x00000100
439 bra e #ih_no_ctxsw
440 // enqueue a context switch for later processing
441 mov $r13 #cmd_queue
442 mov $r14 0x4001
443 call #queue_put
444
445 // anything we didn't handle, bring it to the host's attention
446 ih_no_ctxsw:
447 mov $r11 0x104
448 not b32 $r11
449 and $r11 $r10 $r11
450 bra e #ih_no_other
451 mov $r10 0xc1c
452 shl b32 $r10 6
453 iowr I[$r10] $r11 // INTR_UP_SET
454
455 // ack, and wake up main()
456 ih_no_other:
457 iowr I[$r0 + 0x100] $r10 // INTR_ACK
458
459 pop $r15
460 pop $r14
461 pop $r13
462 pop $r11
463 pop $r10
464 pop $r9
465 pop $r8
466 mov $flags $r8
467 pop $r8
468 bclr $flags $p0
469 iret
470
471// Again, not real sure
472//
473// In: $r15 value to set 0x404170 to
474//
475ctx_4170s:
476 mov $r14 0x4170
477 sethi $r14 0x400000
478 or $r15 0x10
479 call #nv_wr32
480 ret
481
482// Waits for a ctx_4170s() call to complete
483//
484ctx_4170w:
485 mov $r14 0x4170
486 sethi $r14 0x400000
487 call #nv_rd32
488 and $r15 0x10
489 bra ne #ctx_4170w
490 ret
491
492// Disables various things, waits a bit, and re-enables them..
493//
494// Not sure how exactly this helps, perhaps "ENABLE" is not such a
495// good description for the bits we turn off? Anyways, without this,
496// funny things happen.
497//
498ctx_redswitch:
499 mov $r14 0x614
500 shl b32 $r14 6
501 mov $r15 0x270
502 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
503 mov $r15 8
504 ctx_redswitch_delay:
505 sub b32 $r15 1
506 bra ne #ctx_redswitch_delay
507 mov $r15 0x770
508 iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
509 ret
510
511// Not a clue what this is for, except that unless the value is 0x10, the
512// strand context is saved (and presumably restored) incorrectly..
513//
514// In: $r15 value to set to (0x00/0x10 are used)
515//
516ctx_86c:
517 mov $r14 0x86c
518 shl b32 $r14 6
519 iowr I[$r14] $r15 // HUB(0x86c) = val
520 mov $r14 -0x75ec
521 sethi $r14 0x400000
522 call #nv_wr32 // ROP(0xa14) = val
523 mov $r14 -0x5794
524 sethi $r14 0x410000
525 call #nv_wr32 // GPC(0x86c) = val
526 ret
527
528// ctx_load - load's a channel's ctxctl data, and selects its vm
529//
530// In: $r2 channel address
531//
532ctx_load:
533 trace_set(T_CHAN)
534
535 // switch to channel, somewhat magic in parts..
536 mov $r10 12 // DONE_UNK12
537 call #wait_donez
538 mov $r1 0xa24
539 shl b32 $r1 6
540 iowr I[$r1 + 0x000] $r0 // 0x409a24
541 mov $r3 0xb00
542 shl b32 $r3 6
543 iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
544 mov $r1 0xa0c
545 shl b32 $r1 6
546 mov $r4 7
547 iowr I[$r1 + 0x000] $r2 // MEM_CHAN
548 iowr I[$r1 + 0x100] $r4 // MEM_CMD
549 ctx_chan_wait_0:
550 iord $r4 I[$r1 + 0x100]
551 and $r4 0x1f
552 bra ne #ctx_chan_wait_0
553 iowr I[$r3 + 0x000] $r2 // CHAN_CUR
554
555 // load channel header, fetch PGRAPH context pointer
556 mov $xtargets $r0
557 bclr $r2 31
558 shl b32 $r2 4
559 add b32 $r2 2
560
561 trace_set(T_LCHAN)
562 mov $r1 0xa04
563 shl b32 $r1 6
564 iowr I[$r1 + 0x000] $r2 // MEM_BASE
565 mov $r1 0xa20
566 shl b32 $r1 6
567 mov $r2 0x0002
568 sethi $r2 0x80000000
569 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
570 mov $r1 0x10 // chan + 0x0210
571 mov $r2 #xfer_data
572 sethi $r2 0x00020000 // 16 bytes
573 xdld $r1 $r2
574 xdwait
575 trace_clr(T_LCHAN)
576
577 // update current context
578 ld b32 $r1 D[$r0 + #xfer_data + 4]
579 shl b32 $r1 24
580 ld b32 $r2 D[$r0 + #xfer_data + 0]
581 shr b32 $r2 8
582 or $r1 $r2
583 st b32 D[$r0 + #ctx_current] $r1
584
585 // set transfer base to start of context, and fetch context header
586 trace_set(T_LCTXH)
587 mov $r2 0xa04
588 shl b32 $r2 6
589 iowr I[$r2 + 0x000] $r1 // MEM_BASE
590 mov $r2 1
591 mov $r1 0xa20
592 shl b32 $r1 6
593 iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
594 mov $r1 #chan_data
595 sethi $r1 0x00060000 // 256 bytes
596 xdld $r0 $r1
597 xdwait
598 trace_clr(T_LCTXH)
599
600 trace_clr(T_CHAN)
601 ret
602
603// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
604// the active channel for ctxctl, but not actually transfer
605// any context data. intended for use only during initial
606// context construction.
607//
608// In: $r2 channel address
609//
610ctx_chan:
611 call #ctx_load
612 mov $r10 12 // DONE_UNK12
613 call #wait_donez
614 mov $r1 0xa10
615 shl b32 $r1 6
616 mov $r2 5
617 iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
618 ctx_chan_wait:
619 iord $r2 I[$r1 + 0x000]
620 or $r2 $r2
621 bra ne #ctx_chan_wait
622 ret
623
624// Execute per-context state overrides list
625//
626// Only executed on the first load of a channel. Might want to look into
627// removing this and having the host directly modify the channel's context
628// to change this state... The nouveau DRM already builds this list as
629// it's definitely needed for NVIDIA's, so we may as well use it for now
630//
631// Input: $r1 mmio list length
632//
633ctx_mmio_exec:
634 // set transfer base to be the mmio list
635 ld b32 $r3 D[$r0 + #chan_mmio_address]
636 mov $r2 0xa04
637 shl b32 $r2 6
638 iowr I[$r2 + 0x000] $r3 // MEM_BASE
639
640 clear b32 $r3
641 ctx_mmio_loop:
642 // fetch next 256 bytes of mmio list if necessary
643 and $r4 $r3 0xff
644 bra ne #ctx_mmio_pull
645 mov $r5 #xfer_data
646 sethi $r5 0x00060000 // 256 bytes
647 xdld $r3 $r5
648 xdwait
649
650 // execute a single list entry
651 ctx_mmio_pull:
652 ld b32 $r14 D[$r4 + #xfer_data + 0x00]
653 ld b32 $r15 D[$r4 + #xfer_data + 0x04]
654 call #nv_wr32
655
656 // next!
657 add b32 $r3 8
658 sub b32 $r1 1
659 bra ne #ctx_mmio_loop
660
661 // set transfer base back to the current context
662 ctx_mmio_done:
663 ld b32 $r3 D[$r0 + #ctx_current]
664 iowr I[$r2 + 0x000] $r3 // MEM_BASE
665
666 // disable the mmio list now, we don't need/want to execute it again
667 st b32 D[$r0 + #chan_mmio_count] $r0
668 mov $r1 #chan_data
669 sethi $r1 0x00060000 // 256 bytes
670 xdst $r0 $r1
671 xdwait
672 ret
673
674// Transfer HUB context data between GPU and storage area
675//
676// In: $r2 channel address
677// $p1 clear on save, set on load
678// $p2 set if opposite direction done/will be done, so:
679// on save it means: "a load will follow this save"
680// on load it means: "a save preceeded this load"
681//
682ctx_xfer:
683 bra not $p1 #ctx_xfer_pre
684 bra $p2 #ctx_xfer_pre_load
685 ctx_xfer_pre:
686 mov $r15 0x10
687 call #ctx_86c
688 bra not $p1 #ctx_xfer_exec
689
690 ctx_xfer_pre_load:
691 mov $r15 2
692 call #ctx_4170s
693 call #ctx_4170w
694 call #ctx_redswitch
695 clear b32 $r15
696 call #ctx_4170s
697 call #ctx_load
698
699 // fetch context pointer, and initiate xfer on all GPCs
700 ctx_xfer_exec:
701 ld b32 $r1 D[$r0 + #ctx_current]
702 mov $r2 0x414
703 shl b32 $r2 6
704 iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
705 mov $r14 -0x5b00
706 sethi $r14 0x410000
707 mov b32 $r15 $r1
708 call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
709 add b32 $r14 4
710 xbit $r15 $flags $p1
711 xbit $r2 $flags $p2
712 shl b32 $r2 1
713 or $r15 $r2
714 call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
715
716 // strands
717 mov $r1 0x4afc
718 sethi $r1 0x20000
719 mov $r2 0xc
720 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
721 call #strand_wait
722 mov $r2 0x47fc
723 sethi $r2 0x20000
724 iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
725 xbit $r2 $flags $p1
726 add b32 $r2 3
727 iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
728
729 // mmio context
730 xbit $r10 $flags $p1 // direction
731 or $r10 6 // first, last
732 mov $r11 0 // base = 0
733 ld b32 $r12 D[$r0 + #hub_mmio_list_head]
734 ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
735 mov $r14 0 // not multi
736 call #mmctx_xfer
737
738 // wait for GPCs to all complete
739 mov $r10 8 // DONE_BAR
740 call #wait_doneo
741
742 // wait for strand xfer to complete
743 call #strand_wait
744
745 // post-op
746 bra $p1 #ctx_xfer_post
747 mov $r10 12 // DONE_UNK12
748 call #wait_donez
749 mov $r1 0xa10
750 shl b32 $r1 6
751 mov $r2 5
752 iowr I[$r1] $r2 // MEM_CMD
753 ctx_xfer_post_save_wait:
754 iord $r2 I[$r1]
755 or $r2 $r2
756 bra ne #ctx_xfer_post_save_wait
757
758 bra $p2 #ctx_xfer_done
759 ctx_xfer_post:
760 mov $r15 2
761 call #ctx_4170s
762 clear b32 $r15
763 call #ctx_86c
764 call #strand_post
765 call #ctx_4170w
766 clear b32 $r15
767 call #ctx_4170s
768
769 bra not $p1 #ctx_xfer_no_post_mmio
770 ld b32 $r1 D[$r0 + #chan_mmio_count]
771 or $r1 $r1
772 bra e #ctx_xfer_no_post_mmio
773 call #ctx_mmio_exec
774
775 ctx_xfer_no_post_mmio:
776
777 ctx_xfer_done:
778 ret
779
780.align 256
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
new file mode 100644
index 000000000000..decf0c60ca3b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -0,0 +1,857 @@
1uint32_t nve0_grhub_data[] = {
2/* 0x0000: gpc_count */
3 0x00000000,
4/* 0x0004: rop_count */
5 0x00000000,
6/* 0x0008: cmd_queue */
7 0x00000000,
8 0x00000000,
9 0x00000000,
10 0x00000000,
11 0x00000000,
12 0x00000000,
13 0x00000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x00000000,
19 0x00000000,
20 0x00000000,
21 0x00000000,
22 0x00000000,
23 0x00000000,
24 0x00000000,
25/* 0x0050: hub_mmio_list_head */
26 0x00000000,
27/* 0x0054: hub_mmio_list_tail */
28 0x00000000,
29/* 0x0058: ctx_current */
30 0x00000000,
31/* 0x005c: chipsets */
32 0x000000e4,
33 0x013c0070,
34 0x000000e7,
35 0x013c0070,
36 0x00000000,
37/* 0x0070: nve4_hub_mmio_head */
38 0x0417e91c,
39 0x04400204,
40 0x18404010,
41 0x204040a8,
42 0x184040d0,
43 0x004040f8,
44 0x08404130,
45 0x08404150,
46 0x00404164,
47 0x0c4041a0,
48 0x0c404200,
49 0x34404404,
50 0x0c404460,
51 0x00404480,
52 0x00404498,
53 0x0c404604,
54 0x0c404618,
55 0x0440462c,
56 0x00404640,
57 0x00404654,
58 0x00404660,
59 0x48404678,
60 0x084046c8,
61 0x08404700,
62 0x24404718,
63 0x04404744,
64 0x00404754,
65 0x00405800,
66 0x08405830,
67 0x00405854,
68 0x0c405870,
69 0x04405a00,
70 0x00405a18,
71 0x00405b00,
72 0x00405b10,
73 0x00406020,
74 0x0c406028,
75 0x044064a8,
76 0x044064b4,
77 0x2c4064c0,
78 0x004064fc,
79 0x00407040,
80 0x00407804,
81 0x1440780c,
82 0x004078bc,
83 0x18408000,
84 0x00408064,
85 0x08408800,
86 0x00408840,
87 0x08408900,
88 0x00408980,
89/* 0x013c: nve4_hub_mmio_tail */
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x00000000,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x00000000,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x00000000,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x00000000,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x00000000,
139/* 0x0200: chan_data */
140/* 0x0200: chan_mmio_count */
141 0x00000000,
142/* 0x0204: chan_mmio_address */
143 0x00000000,
144 0x00000000,
145 0x00000000,
146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x00000000,
204 0x00000000,
205 0x00000000,
206/* 0x0300: xfer_data */
207 0x00000000,
208};
209
210uint32_t nve0_grhub_code[] = {
211 0x03090ef5,
212/* 0x0004: queue_put */
213 0x9800d898,
214 0x86f001d9,
215 0x0489b808,
216 0xf00c1bf4,
217 0x21f502f7,
218 0x00f802ec,
219/* 0x001c: queue_put_next */
220 0xb60798c4,
221 0x8dbb0384,
222 0x0880b600,
223 0x80008e80,
224 0x90b6018f,
225 0x0f94f001,
226 0xf801d980,
227/* 0x0039: queue_get */
228 0x0131f400,
229 0x9800d898,
230 0x89b801d9,
231 0x210bf404,
232 0xb60789c4,
233 0x9dbb0394,
234 0x0890b600,
235 0x98009e98,
236 0x80b6019f,
237 0x0f84f001,
238 0xf400d880,
239/* 0x0066: queue_get_done */
240 0x00f80132,
241/* 0x0068: nv_rd32 */
242 0x0728b7f1,
243 0xb906b4b6,
244 0xc9f002ec,
245 0x00bcd01f,
246/* 0x0078: nv_rd32_wait */
247 0xc800bccf,
248 0x1bf41fcc,
249 0x06a7f0fa,
250 0x010321f5,
251 0xf840bfcf,
252/* 0x008d: nv_wr32 */
253 0x28b7f100,
254 0x06b4b607,
255 0xb980bfd0,
256 0xc9f002ec,
257 0x1ec9f01f,
258/* 0x00a3: nv_wr32_wait */
259 0xcf00bcd0,
260 0xccc800bc,
261 0xfa1bf41f,
262/* 0x00ae: watchdog_reset */
263 0x87f100f8,
264 0x84b60430,
265 0x1ff9f006,
266 0xf8008fd0,
267/* 0x00bd: watchdog_clear */
268 0x3087f100,
269 0x0684b604,
270 0xf80080d0,
271/* 0x00c9: wait_donez */
272 0x3c87f100,
273 0x0684b608,
274 0x99f094bd,
275 0x0089d000,
276 0x081887f1,
277 0xd00684b6,
278/* 0x00e2: wait_done_wait_donez */
279 0x87f1008a,
280 0x84b60400,
281 0x0088cf06,
282 0xf4888aff,
283 0x87f1f31b,
284 0x84b6085c,
285 0xf094bd06,
286 0x89d00099,
287/* 0x0103: wait_doneo */
288 0xf100f800,
289 0xb6083c87,
290 0x94bd0684,
291 0xd00099f0,
292 0x87f10089,
293 0x84b60818,
294 0x008ad006,
295/* 0x011c: wait_done_wait_doneo */
296 0x040087f1,
297 0xcf0684b6,
298 0x8aff0088,
299 0xf30bf488,
300 0x085c87f1,
301 0xbd0684b6,
302 0x0099f094,
303 0xf80089d0,
304/* 0x013d: mmctx_size */
305/* 0x013f: nv_mmctx_size_loop */
306 0x9894bd00,
307 0x85b600e8,
308 0x0180b61a,
309 0xbb0284b6,
310 0xe0b60098,
311 0x04efb804,
312 0xb9eb1bf4,
313 0x00f8029f,
314/* 0x015c: mmctx_xfer */
315 0x083c87f1,
316 0xbd0684b6,
317 0x0199f094,
318 0xf10089d0,
319 0xb6071087,
320 0x94bd0684,
321 0xf405bbfd,
322 0x8bd0090b,
323 0x0099f000,
324/* 0x0180: mmctx_base_disabled */
325 0xf405eefd,
326 0x8ed00c0b,
327 0xc08fd080,
328/* 0x018f: mmctx_multi_disabled */
329 0xb70199f0,
330 0xc8010080,
331 0xb4b600ab,
332 0x0cb9f010,
333 0xb601aec8,
334 0xbefd11e4,
335 0x008bd005,
336/* 0x01a8: mmctx_exec_loop */
337/* 0x01a8: mmctx_wait_free */
338 0xf0008ecf,
339 0x0bf41fe4,
340 0x00ce98fa,
341 0xd005e9fd,
342 0xc0b6c08e,
343 0x04cdb804,
344 0xc8e81bf4,
345 0x1bf402ab,
346/* 0x01c9: mmctx_fini_wait */
347 0x008bcf18,
348 0xb01fb4f0,
349 0x1bf410b4,
350 0x02a7f0f7,
351 0xf4c921f4,
352/* 0x01de: mmctx_stop */
353 0xabc81b0e,
354 0x10b4b600,
355 0xf00cb9f0,
356 0x8bd012b9,
357/* 0x01ed: mmctx_stop_wait */
358 0x008bcf00,
359 0xf412bbc8,
360/* 0x01f6: mmctx_done */
361 0x87f1fa1b,
362 0x84b6085c,
363 0xf094bd06,
364 0x89d00199,
365/* 0x0207: strand_wait */
366 0xf900f800,
367 0x02a7f0a0,
368 0xfcc921f4,
369/* 0x0213: strand_pre */
370 0xf100f8a0,
371 0xf04afc87,
372 0x97f00283,
373 0x0089d00c,
374 0x020721f5,
375/* 0x0226: strand_post */
376 0x87f100f8,
377 0x83f04afc,
378 0x0d97f002,
379 0xf50089d0,
380 0xf8020721,
381/* 0x0239: strand_set */
382 0xfca7f100,
383 0x02a3f04f,
384 0x0500aba2,
385 0xd00fc7f0,
386 0xc7f000ac,
387 0x00bcd00b,
388 0x020721f5,
389 0xf000aed0,
390 0xbcd00ac7,
391 0x0721f500,
392/* 0x0263: strand_ctx_init */
393 0xf100f802,
394 0xb6083c87,
395 0x94bd0684,
396 0xd00399f0,
397 0x21f50089,
398 0xe7f00213,
399 0x3921f503,
400 0xfca7f102,
401 0x02a3f046,
402 0x0400aba0,
403 0xf040a0d0,
404 0xbcd001c7,
405 0x0721f500,
406 0x010c9202,
407 0xf000acd0,
408 0xbcd002c7,
409 0x0721f500,
410 0x2621f502,
411 0x8087f102,
412 0x0684b608,
413 0xb70089cf,
414 0x95220080,
415/* 0x02ba: ctx_init_strand_loop */
416 0x8ed008fe,
417 0x408ed000,
418 0xb6808acf,
419 0xa0b606a5,
420 0x00eabb01,
421 0xb60480b6,
422 0x1bf40192,
423 0x08e4b6e8,
424 0xf1f2efbc,
425 0xb6085c87,
426 0x94bd0684,
427 0xd00399f0,
428 0x00f80089,
429/* 0x02ec: error */
430 0xe7f1e0f9,
431 0xe4b60814,
432 0x00efd006,
433 0x0c1ce7f1,
434 0xf006e4b6,
435 0xefd001f7,
436 0xf8e0fc00,
437/* 0x0309: init */
438 0xfe04bd00,
439 0x07fe0004,
440 0x0017f100,
441 0x0227f012,
442 0xf10012d0,
443 0xfe05b917,
444 0x17f10010,
445 0x10d00400,
446 0x0437f1c0,
447 0x0634b604,
448 0x200327f1,
449 0xf10032d0,
450 0xd0200427,
451 0x27f10132,
452 0x32d0200b,
453 0x0c27f102,
454 0x0732d020,
455 0x0c2427f1,
456 0xb90624b6,
457 0x23d00003,
458 0x0427f100,
459 0x0023f087,
460 0xb70012d0,
461 0xf0010012,
462 0x12d00427,
463 0x1031f400,
464 0x9604e7f1,
465 0xf440e3f0,
466 0xf1c76821,
467 0x01018090,
468 0x801ff4f0,
469 0x17f0000f,
470 0x041fbb01,
471 0xf10112b6,
472 0xb6040c27,
473 0x21d00624,
474 0x4021d000,
475 0x080027f1,
476 0xcf0624b6,
477 0xf7f00022,
478/* 0x03a9: init_find_chipset */
479 0x08f0b654,
480 0xb800f398,
481 0x0bf40432,
482 0x0034b00b,
483 0xf8f11bf4,
484/* 0x03bd: init_context */
485 0x0017f100,
486 0x02fe5801,
487 0xf003ff58,
488 0x0e8000e3,
489 0x150f8014,
490 0x013d21f5,
491 0x070037f1,
492 0x950634b6,
493 0x34d00814,
494 0x4034d000,
495 0x130030b7,
496 0xb6001fbb,
497 0x3fd002f5,
498 0x0815b600,
499 0xb60110b6,
500 0x1fb90814,
501 0x6321f502,
502 0x001fbb02,
503 0xf1000398,
504 0xf0200047,
505/* 0x040e: init_gpc */
506 0x4ea05043,
507 0x1fb90804,
508 0x8d21f402,
509 0x08004ea0,
510 0xf4022fb9,
511 0x4ea08d21,
512 0xf4bd010c,
513 0xa08d21f4,
514 0xf401044e,
515 0x4ea08d21,
516 0xf7f00100,
517 0x8d21f402,
518 0x08004ea0,
519/* 0x0440: init_gpc_wait */
520 0xc86821f4,
521 0x0bf41fff,
522 0x044ea0fa,
523 0x6821f408,
524 0xb7001fbb,
525 0xb6800040,
526 0x1bf40132,
527 0x0027f1b4,
528 0x0624b608,
529 0xb74021d0,
530 0xbd080020,
531 0x1f19f014,
532/* 0x0473: main */
533 0xf40021d0,
534 0x28f40031,
535 0x08d7f000,
536 0xf43921f4,
537 0xe4b1f401,
538 0x1bf54001,
539 0x87f100d1,
540 0x84b6083c,
541 0xf094bd06,
542 0x89d00499,
543 0x0017f100,
544 0x0614b60b,
545 0xcf4012cf,
546 0x13c80011,
547 0x7e0bf41f,
548 0xf41f23c8,
549 0x20f95a0b,
550 0xf10212b9,
551 0xb6083c87,
552 0x94bd0684,
553 0xd00799f0,
554 0x32f40089,
555 0x0231f401,
556 0x07fb21f5,
557 0x085c87f1,
558 0xbd0684b6,
559 0x0799f094,
560 0xfc0089d0,
561 0x3c87f120,
562 0x0684b608,
563 0x99f094bd,
564 0x0089d006,
565 0xf50131f4,
566 0xf107fb21,
567 0xb6085c87,
568 0x94bd0684,
569 0xd00699f0,
570 0x0ef40089,
571/* 0x0509: chsw_prev_no_next */
572 0xb920f931,
573 0x32f40212,
574 0x0232f401,
575 0x07fb21f5,
576 0x17f120fc,
577 0x14b60b00,
578 0x0012d006,
579/* 0x0527: chsw_no_prev */
580 0xc8130ef4,
581 0x0bf41f23,
582 0x0131f40d,
583 0xf50232f4,
584/* 0x0537: chsw_done */
585 0xf107fb21,
586 0xb60b0c17,
587 0x27f00614,
588 0x0012d001,
589 0x085c87f1,
590 0xbd0684b6,
591 0x0499f094,
592 0xf50089d0,
593/* 0x0557: main_not_ctx_switch */
594 0xb0ff200e,
595 0x1bf401e4,
596 0x02f2b90d,
597 0x078f21f5,
598/* 0x0567: main_not_ctx_chan */
599 0xb0420ef4,
600 0x1bf402e4,
601 0x3c87f12e,
602 0x0684b608,
603 0x99f094bd,
604 0x0089d007,
605 0xf40132f4,
606 0x21f50232,
607 0x87f107fb,
608 0x84b6085c,
609 0xf094bd06,
610 0x89d00799,
611 0x110ef400,
612/* 0x0598: main_not_ctx_save */
613 0xf010ef94,
614 0x21f501f5,
615 0x0ef502ec,
616/* 0x05a6: main_done */
617 0x17f1fed1,
618 0x14b60820,
619 0xf024bd06,
620 0x12d01f29,
621 0xbe0ef500,
622/* 0x05b9: ih */
623 0xfe80f9fe,
624 0x80f90188,
625 0xa0f990f9,
626 0xd0f9b0f9,
627 0xf0f9e0f9,
628 0xc4800acf,
629 0x0bf404ab,
630 0x00b7f11d,
631 0x08d7f019,
632 0xcf40becf,
633 0x21f400bf,
634 0x00b0b704,
635 0x01e7f004,
636/* 0x05ef: ih_no_fifo */
637 0xe400bed0,
638 0xf40100ab,
639 0xd7f00d0b,
640 0x01e7f108,
641 0x0421f440,
642/* 0x0600: ih_no_ctxsw */
643 0x0104b7f1,
644 0xabffb0bd,
645 0x0d0bf4b4,
646 0x0c1ca7f1,
647 0xd006a4b6,
648/* 0x0616: ih_no_other */
649 0x0ad000ab,
650 0xfcf0fc40,
651 0xfcd0fce0,
652 0xfca0fcb0,
653 0xfe80fc90,
654 0x80fc0088,
655 0xf80032f4,
656/* 0x0631: ctx_4170s */
657 0x70e7f101,
658 0x40e3f041,
659 0xf410f5f0,
660 0x00f88d21,
661/* 0x0640: ctx_4170w */
662 0x4170e7f1,
663 0xf440e3f0,
664 0xf4f06821,
665 0xf31bf410,
666/* 0x0652: ctx_redswitch */
667 0xe7f100f8,
668 0xe4b60614,
669 0x70f7f106,
670 0x00efd002,
671/* 0x0663: ctx_redswitch_delay */
672 0xb608f7f0,
673 0x1bf401f2,
674 0x70f7f1fd,
675 0x00efd007,
676/* 0x0672: ctx_86c */
677 0xe7f100f8,
678 0xe4b6086c,
679 0x00efd006,
680 0x8a14e7f1,
681 0xf440e3f0,
682 0xe7f18d21,
683 0xe3f0a86c,
684 0x8d21f441,
685/* 0x0692: ctx_load */
686 0x87f100f8,
687 0x84b6083c,
688 0xf094bd06,
689 0x89d00599,
690 0x0ca7f000,
691 0xf1c921f4,
692 0xb60a2417,
693 0x10d00614,
694 0x0037f100,
695 0x0634b60b,
696 0xf14032d0,
697 0xb60a0c17,
698 0x47f00614,
699 0x0012d007,
700/* 0x06cb: ctx_chan_wait_0 */
701 0xcf4014d0,
702 0x44f04014,
703 0xfa1bf41f,
704 0xfe0032d0,
705 0x2af0000b,
706 0x0424b61f,
707 0xf10220b6,
708 0xb6083c87,
709 0x94bd0684,
710 0xd00899f0,
711 0x17f10089,
712 0x14b60a04,
713 0x0012d006,
714 0x0a2017f1,
715 0xf00614b6,
716 0x23f10227,
717 0x12d08000,
718 0x1017f000,
719 0x030027f1,
720 0xfa0223f0,
721 0x03f80512,
722 0x085c87f1,
723 0xbd0684b6,
724 0x0899f094,
725 0x980089d0,
726 0x14b6c101,
727 0xc0029818,
728 0xfd0825b6,
729 0x01800512,
730 0x3c87f116,
731 0x0684b608,
732 0x99f094bd,
733 0x0089d009,
734 0x0a0427f1,
735 0xd00624b6,
736 0x27f00021,
737 0x2017f101,
738 0x0614b60a,
739 0xf10012d0,
740 0xf0020017,
741 0x01fa0613,
742 0xf103f805,
743 0xb6085c87,
744 0x94bd0684,
745 0xd00999f0,
746 0x87f10089,
747 0x84b6085c,
748 0xf094bd06,
749 0x89d00599,
750/* 0x078f: ctx_chan */
751 0xf500f800,
752 0xf0069221,
753 0x21f40ca7,
754 0x1017f1c9,
755 0x0614b60a,
756 0xd00527f0,
757/* 0x07a6: ctx_chan_wait */
758 0x12cf0012,
759 0x0522fd00,
760 0xf8fa1bf4,
761/* 0x07b1: ctx_mmio_exec */
762 0x81039800,
763 0x0a0427f1,
764 0xd00624b6,
765 0x34bd0023,
766/* 0x07c0: ctx_mmio_loop */
767 0xf4ff34c4,
768 0x57f10f1b,
769 0x53f00300,
770 0x0535fa06,
771/* 0x07d2: ctx_mmio_pull */
772 0x4e9803f8,
773 0xc14f98c0,
774 0xb68d21f4,
775 0x12b60830,
776 0xdf1bf401,
777/* 0x07e4: ctx_mmio_done */
778 0xd0160398,
779 0x00800023,
780 0x0017f180,
781 0x0613f002,
782 0xf80601fa,
783/* 0x07fb: ctx_xfer */
784 0xf400f803,
785 0x02f40611,
786/* 0x0801: ctx_xfer_pre */
787 0x10f7f00d,
788 0x067221f5,
789/* 0x080b: ctx_xfer_pre_load */
790 0xf01c11f4,
791 0x21f502f7,
792 0x21f50631,
793 0x21f50640,
794 0xf4bd0652,
795 0x063121f5,
796 0x069221f5,
797/* 0x0824: ctx_xfer_exec */
798 0xf1160198,
799 0xb6041427,
800 0x20d00624,
801 0x00e7f100,
802 0x41e3f0a5,
803 0xf4021fb9,
804 0xe0b68d21,
805 0x01fcf004,
806 0xb6022cf0,
807 0xf2fd0124,
808 0x8d21f405,
809 0x4afc17f1,
810 0xf00213f0,
811 0x12d00c27,
812 0x0721f500,
813 0xfc27f102,
814 0x0223f047,
815 0xf00020d0,
816 0x20b6012c,
817 0x0012d003,
818 0xf001acf0,
819 0xb7f006a5,
820 0x140c9800,
821 0xf0150d98,
822 0x21f500e7,
823 0xa7f0015c,
824 0x0321f508,
825 0x0721f501,
826 0x2201f402,
827 0xf40ca7f0,
828 0x17f1c921,
829 0x14b60a10,
830 0x0527f006,
831/* 0x08ab: ctx_xfer_post_save_wait */
832 0xcf0012d0,
833 0x22fd0012,
834 0xfa1bf405,
835/* 0x08b7: ctx_xfer_post */
836 0xf02e02f4,
837 0x21f502f7,
838 0xf4bd0631,
839 0x067221f5,
840 0x022621f5,
841 0x064021f5,
842 0x21f5f4bd,
843 0x11f40631,
844 0x80019810,
845 0xf40511fd,
846 0x21f5070b,
847/* 0x08e2: ctx_xfer_no_post_mmio */
848/* 0x08e2: ctx_xfer_done */
849 0x00f807b1,
850 0x00000000,
851 0x00000000,
852 0x00000000,
853 0x00000000,
854 0x00000000,
855 0x00000000,
856 0x00000000,
857};
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
index e6b228844a32..e6b228844a32 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nvc0.fuc
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
new file mode 100644
index 000000000000..f16a5d53319d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/nve0.fuc
@@ -0,0 +1,400 @@
1/* fuc microcode util functions for nve0 PGRAPH
2 *
3 * Copyright 2011 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Ben Skeggs
24 */
25
26define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
27define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
28
29ifdef(`include_code', `
30// Error codes
31define(`E_BAD_COMMAND', 0x01)
32define(`E_CMD_OVERFLOW', 0x02)
33
34// Util macros to help with debugging ucode hangs etc
35define(`T_WAIT', 0)
36define(`T_MMCTX', 1)
37define(`T_STRWAIT', 2)
38define(`T_STRINIT', 3)
39define(`T_AUTO', 4)
40define(`T_CHAN', 5)
41define(`T_LOAD', 6)
42define(`T_SAVE', 7)
43define(`T_LCHAN', 8)
44define(`T_LCTXH', 9)
45
46define(`trace_set', `
47 mov $r8 0x83c
48 shl b32 $r8 6
49 clear b32 $r9
50 bset $r9 $1
51 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
52')
53
54define(`trace_clr', `
55 mov $r8 0x85c
56 shl b32 $r8 6
57 clear b32 $r9
58 bset $r9 $1
59 iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
60')
61
62// queue_put - add request to queue
63//
64// In : $r13 queue pointer
65// $r14 command
66// $r15 data
67//
68queue_put:
69 // make sure we have space..
70 ld b32 $r8 D[$r13 + 0x0] // GET
71 ld b32 $r9 D[$r13 + 0x4] // PUT
72 xor $r8 8
73 cmpu b32 $r8 $r9
74 bra ne #queue_put_next
75 mov $r15 E_CMD_OVERFLOW
76 call #error
77 ret
78
79 // store cmd/data on queue
80 queue_put_next:
81 and $r8 $r9 7
82 shl b32 $r8 3
83 add b32 $r8 $r13
84 add b32 $r8 8
85 st b32 D[$r8 + 0x0] $r14
86 st b32 D[$r8 + 0x4] $r15
87
88 // update PUT
89 add b32 $r9 1
90 and $r9 0xf
91 st b32 D[$r13 + 0x4] $r9
92 ret
93
94// queue_get - fetch request from queue
95//
96// In : $r13 queue pointer
97//
98// Out: $p1 clear on success (data available)
99// $r14 command
100// $r15 data
101//
102queue_get:
103 bset $flags $p1
104 ld b32 $r8 D[$r13 + 0x0] // GET
105 ld b32 $r9 D[$r13 + 0x4] // PUT
106 cmpu b32 $r8 $r9
107 bra e #queue_get_done
108 // fetch first cmd/data pair
109 and $r9 $r8 7
110 shl b32 $r9 3
111 add b32 $r9 $r13
112 add b32 $r9 8
113 ld b32 $r14 D[$r9 + 0x0]
114 ld b32 $r15 D[$r9 + 0x4]
115
116 // update GET
117 add b32 $r8 1
118 and $r8 0xf
119 st b32 D[$r13 + 0x0] $r8
120 bclr $flags $p1
121queue_get_done:
122 ret
123
124// nv_rd32 - read 32-bit value from nv register
125//
126// In : $r14 register
127// Out: $r15 value
128//
129nv_rd32:
130 mov $r11 0x728
131 shl b32 $r11 6
132 mov b32 $r12 $r14
133 bset $r12 31 // MMIO_CTRL_PENDING
134 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
135 nv_rd32_wait:
136 iord $r12 I[$r11 + 0x000]
137 xbit $r12 $r12 31
138 bra ne #nv_rd32_wait
139 mov $r10 6 // DONE_MMIO_RD
140 call #wait_doneo
141 iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
142 ret
143
144// nv_wr32 - write 32-bit value to nv register
145//
146// In : $r14 register
147// $r15 value
148//
149nv_wr32:
150 mov $r11 0x728
151 shl b32 $r11 6
152 iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
153 mov b32 $r12 $r14
154 bset $r12 31 // MMIO_CTRL_PENDING
155 bset $r12 30 // MMIO_CTRL_WRITE
156 iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
157 nv_wr32_wait:
158 iord $r12 I[$r11 + 0x000]
159 xbit $r12 $r12 31
160 bra ne #nv_wr32_wait
161 ret
162
163// (re)set watchdog timer
164//
165// In : $r15 timeout
166//
167watchdog_reset:
168 mov $r8 0x430
169 shl b32 $r8 6
170 bset $r15 31
171 iowr I[$r8 + 0x000] $r15
172 ret
173
174// clear watchdog timer
175watchdog_clear:
176 mov $r8 0x430
177 shl b32 $r8 6
178 iowr I[$r8 + 0x000] $r0
179 ret
180
181// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
182//
183// In : $r10 bit to wait on
184//
185define(`wait_done', `
186$1:
187 trace_set(T_WAIT);
188 mov $r8 0x818
189 shl b32 $r8 6
190 iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
191 wait_done_$1:
192 mov $r8 0x400
193 shl b32 $r8 6
194 iord $r8 I[$r8 + 0x000] // DONE
195 xbit $r8 $r8 $r10
196 bra $2 #wait_done_$1
197 trace_clr(T_WAIT)
198 ret
199')
200wait_done(wait_donez, ne)
201wait_done(wait_doneo, e)
202
203// mmctx_size - determine size of a mmio list transfer
204//
205// In : $r14 mmio list head
206// $r15 mmio list tail
207// Out: $r15 transfer size (in bytes)
208//
209mmctx_size:
210 clear b32 $r9
211 nv_mmctx_size_loop:
212 ld b32 $r8 D[$r14]
213 shr b32 $r8 26
214 add b32 $r8 1
215 shl b32 $r8 2
216 add b32 $r9 $r8
217 add b32 $r14 4
218 cmpu b32 $r14 $r15
219 bra ne #nv_mmctx_size_loop
220 mov b32 $r15 $r9
221 ret
222
223// mmctx_xfer - execute a list of mmio transfers
224//
225// In : $r10 flags
226// bit 0: direction (0 = save, 1 = load)
227// bit 1: set if first transfer
228// bit 2: set if last transfer
229// $r11 base
230// $r12 mmio list head
231// $r13 mmio list tail
232// $r14 multi_stride
233// $r15 multi_mask
234//
235mmctx_xfer:
236 trace_set(T_MMCTX)
237 mov $r8 0x710
238 shl b32 $r8 6
239 clear b32 $r9
240 or $r11 $r11
241 bra e #mmctx_base_disabled
242 iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
243 bset $r9 0 // BASE_EN
244 mmctx_base_disabled:
245 or $r14 $r14
246 bra e #mmctx_multi_disabled
247 iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
248 iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
249 bset $r9 1 // MULTI_EN
250 mmctx_multi_disabled:
251 add b32 $r8 0x100
252
253 xbit $r11 $r10 0
254 shl b32 $r11 16 // DIR
255 bset $r11 12 // QLIMIT = 0x10
256 xbit $r14 $r10 1
257 shl b32 $r14 17
258 or $r11 $r14 // START_TRIGGER
259 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
260
261 // loop over the mmio list, and send requests to the hw
262 mmctx_exec_loop:
263 // wait for space in mmctx queue
264 mmctx_wait_free:
265 iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
266 and $r14 0x1f
267 bra e #mmctx_wait_free
268
269 // queue up an entry
270 ld b32 $r14 D[$r12]
271 or $r14 $r9
272 iowr I[$r8 + 0x300] $r14
273 add b32 $r12 4
274 cmpu b32 $r12 $r13
275 bra ne #mmctx_exec_loop
276
277 xbit $r11 $r10 2
278 bra ne #mmctx_stop
279 // wait for queue to empty
280 mmctx_fini_wait:
281 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
282 and $r11 0x1f
283 cmpu b32 $r11 0x10
284 bra ne #mmctx_fini_wait
285 mov $r10 2 // DONE_MMCTX
286 call #wait_donez
287 bra #mmctx_done
288 mmctx_stop:
289 xbit $r11 $r10 0
290 shl b32 $r11 16 // DIR
291 bset $r11 12 // QLIMIT = 0x10
292 bset $r11 18 // STOP_TRIGGER
293 iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
294 mmctx_stop_wait:
295 // wait for STOP_TRIGGER to clear
296 iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
297 xbit $r11 $r11 18
298 bra ne #mmctx_stop_wait
299 mmctx_done:
300 trace_clr(T_MMCTX)
301 ret
302
303// Wait for DONE_STRAND
304//
305strand_wait:
306 push $r10
307 mov $r10 2
308 call #wait_donez
309 pop $r10
310 ret
311
312// unknown - call before issuing strand commands
313//
314strand_pre:
315 mov $r8 0x4afc
316 sethi $r8 0x20000
317 mov $r9 0xc
318 iowr I[$r8] $r9
319 call #strand_wait
320 ret
321
322// unknown - call after issuing strand commands
323//
324strand_post:
325 mov $r8 0x4afc
326 sethi $r8 0x20000
327 mov $r9 0xd
328 iowr I[$r8] $r9
329 call #strand_wait
330 ret
331
332// Selects strand set?!
333//
334// In: $r14 id
335//
336strand_set:
337 mov $r10 0x4ffc
338 sethi $r10 0x20000
339 sub b32 $r11 $r10 0x500
340 mov $r12 0xf
341 iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
342 mov $r12 0xb
343 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
344 call #strand_wait
345 iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
346 mov $r12 0xa
347 iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
348 call #strand_wait
349 ret
350
351// Initialise strand context data
352//
353// In : $r15 context base
354// Out: $r15 context size (in bytes)
355//
356// Strandset(?) 3 hardcoded currently
357//
358strand_ctx_init:
359 trace_set(T_STRINIT)
360 call #strand_pre
361 mov $r14 3
362 call #strand_set
363 mov $r10 0x46fc
364 sethi $r10 0x20000
365 add b32 $r11 $r10 0x400
366 iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
367 mov $r12 1
368 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
369 call #strand_wait
370 sub b32 $r12 $r0 1
371 iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
372 mov $r12 2
373 iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
374 call #strand_wait
375 call #strand_post
376
377 // read the size of each strand, poke the context offset of
378 // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
379 // about it later then.
380 mov $r8 0x880
381 shl b32 $r8 6
382 iord $r9 I[$r8 + 0x000] // STRANDS
383 add b32 $r8 0x2200
384 shr b32 $r14 $r15 8
385 ctx_init_strand_loop:
386 iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
387 iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
388 iord $r10 I[$r8 + 0x200] // STRAND_SIZE
389 shr b32 $r10 6
390 add b32 $r10 1
391 add b32 $r14 $r10
392 add b32 $r8 4
393 sub b32 $r9 1
394 bra ne #ctx_init_strand_loop
395
396 shl b32 $r14 8
397 sub b32 $r15 $r14 $r15
398 trace_clr(T_STRINIT)
399 ret
400')
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
new file mode 100644
index 000000000000..618528248457
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -0,0 +1,1387 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/namedb.h>
29
30#include <subdev/fb.h>
31#include <subdev/instmem.h>
32#include <subdev/timer.h>
33
34#include <engine/fifo.h>
35#include <engine/graph.h>
36
37#include "regs.h"
38
39static u32
40nv04_graph_ctx_regs[] = {
41 0x0040053c,
42 0x00400544,
43 0x00400540,
44 0x00400548,
45 NV04_PGRAPH_CTX_SWITCH1,
46 NV04_PGRAPH_CTX_SWITCH2,
47 NV04_PGRAPH_CTX_SWITCH3,
48 NV04_PGRAPH_CTX_SWITCH4,
49 NV04_PGRAPH_CTX_CACHE1,
50 NV04_PGRAPH_CTX_CACHE2,
51 NV04_PGRAPH_CTX_CACHE3,
52 NV04_PGRAPH_CTX_CACHE4,
53 0x00400184,
54 0x004001a4,
55 0x004001c4,
56 0x004001e4,
57 0x00400188,
58 0x004001a8,
59 0x004001c8,
60 0x004001e8,
61 0x0040018c,
62 0x004001ac,
63 0x004001cc,
64 0x004001ec,
65 0x00400190,
66 0x004001b0,
67 0x004001d0,
68 0x004001f0,
69 0x00400194,
70 0x004001b4,
71 0x004001d4,
72 0x004001f4,
73 0x00400198,
74 0x004001b8,
75 0x004001d8,
76 0x004001f8,
77 0x0040019c,
78 0x004001bc,
79 0x004001dc,
80 0x004001fc,
81 0x00400174,
82 NV04_PGRAPH_DMA_START_0,
83 NV04_PGRAPH_DMA_START_1,
84 NV04_PGRAPH_DMA_LENGTH,
85 NV04_PGRAPH_DMA_MISC,
86 NV04_PGRAPH_DMA_PITCH,
87 NV04_PGRAPH_BOFFSET0,
88 NV04_PGRAPH_BBASE0,
89 NV04_PGRAPH_BLIMIT0,
90 NV04_PGRAPH_BOFFSET1,
91 NV04_PGRAPH_BBASE1,
92 NV04_PGRAPH_BLIMIT1,
93 NV04_PGRAPH_BOFFSET2,
94 NV04_PGRAPH_BBASE2,
95 NV04_PGRAPH_BLIMIT2,
96 NV04_PGRAPH_BOFFSET3,
97 NV04_PGRAPH_BBASE3,
98 NV04_PGRAPH_BLIMIT3,
99 NV04_PGRAPH_BOFFSET4,
100 NV04_PGRAPH_BBASE4,
101 NV04_PGRAPH_BLIMIT4,
102 NV04_PGRAPH_BOFFSET5,
103 NV04_PGRAPH_BBASE5,
104 NV04_PGRAPH_BLIMIT5,
105 NV04_PGRAPH_BPITCH0,
106 NV04_PGRAPH_BPITCH1,
107 NV04_PGRAPH_BPITCH2,
108 NV04_PGRAPH_BPITCH3,
109 NV04_PGRAPH_BPITCH4,
110 NV04_PGRAPH_SURFACE,
111 NV04_PGRAPH_STATE,
112 NV04_PGRAPH_BSWIZZLE2,
113 NV04_PGRAPH_BSWIZZLE5,
114 NV04_PGRAPH_BPIXEL,
115 NV04_PGRAPH_NOTIFY,
116 NV04_PGRAPH_PATT_COLOR0,
117 NV04_PGRAPH_PATT_COLOR1,
118 NV04_PGRAPH_PATT_COLORRAM+0x00,
119 NV04_PGRAPH_PATT_COLORRAM+0x04,
120 NV04_PGRAPH_PATT_COLORRAM+0x08,
121 NV04_PGRAPH_PATT_COLORRAM+0x0c,
122 NV04_PGRAPH_PATT_COLORRAM+0x10,
123 NV04_PGRAPH_PATT_COLORRAM+0x14,
124 NV04_PGRAPH_PATT_COLORRAM+0x18,
125 NV04_PGRAPH_PATT_COLORRAM+0x1c,
126 NV04_PGRAPH_PATT_COLORRAM+0x20,
127 NV04_PGRAPH_PATT_COLORRAM+0x24,
128 NV04_PGRAPH_PATT_COLORRAM+0x28,
129 NV04_PGRAPH_PATT_COLORRAM+0x2c,
130 NV04_PGRAPH_PATT_COLORRAM+0x30,
131 NV04_PGRAPH_PATT_COLORRAM+0x34,
132 NV04_PGRAPH_PATT_COLORRAM+0x38,
133 NV04_PGRAPH_PATT_COLORRAM+0x3c,
134 NV04_PGRAPH_PATT_COLORRAM+0x40,
135 NV04_PGRAPH_PATT_COLORRAM+0x44,
136 NV04_PGRAPH_PATT_COLORRAM+0x48,
137 NV04_PGRAPH_PATT_COLORRAM+0x4c,
138 NV04_PGRAPH_PATT_COLORRAM+0x50,
139 NV04_PGRAPH_PATT_COLORRAM+0x54,
140 NV04_PGRAPH_PATT_COLORRAM+0x58,
141 NV04_PGRAPH_PATT_COLORRAM+0x5c,
142 NV04_PGRAPH_PATT_COLORRAM+0x60,
143 NV04_PGRAPH_PATT_COLORRAM+0x64,
144 NV04_PGRAPH_PATT_COLORRAM+0x68,
145 NV04_PGRAPH_PATT_COLORRAM+0x6c,
146 NV04_PGRAPH_PATT_COLORRAM+0x70,
147 NV04_PGRAPH_PATT_COLORRAM+0x74,
148 NV04_PGRAPH_PATT_COLORRAM+0x78,
149 NV04_PGRAPH_PATT_COLORRAM+0x7c,
150 NV04_PGRAPH_PATT_COLORRAM+0x80,
151 NV04_PGRAPH_PATT_COLORRAM+0x84,
152 NV04_PGRAPH_PATT_COLORRAM+0x88,
153 NV04_PGRAPH_PATT_COLORRAM+0x8c,
154 NV04_PGRAPH_PATT_COLORRAM+0x90,
155 NV04_PGRAPH_PATT_COLORRAM+0x94,
156 NV04_PGRAPH_PATT_COLORRAM+0x98,
157 NV04_PGRAPH_PATT_COLORRAM+0x9c,
158 NV04_PGRAPH_PATT_COLORRAM+0xa0,
159 NV04_PGRAPH_PATT_COLORRAM+0xa4,
160 NV04_PGRAPH_PATT_COLORRAM+0xa8,
161 NV04_PGRAPH_PATT_COLORRAM+0xac,
162 NV04_PGRAPH_PATT_COLORRAM+0xb0,
163 NV04_PGRAPH_PATT_COLORRAM+0xb4,
164 NV04_PGRAPH_PATT_COLORRAM+0xb8,
165 NV04_PGRAPH_PATT_COLORRAM+0xbc,
166 NV04_PGRAPH_PATT_COLORRAM+0xc0,
167 NV04_PGRAPH_PATT_COLORRAM+0xc4,
168 NV04_PGRAPH_PATT_COLORRAM+0xc8,
169 NV04_PGRAPH_PATT_COLORRAM+0xcc,
170 NV04_PGRAPH_PATT_COLORRAM+0xd0,
171 NV04_PGRAPH_PATT_COLORRAM+0xd4,
172 NV04_PGRAPH_PATT_COLORRAM+0xd8,
173 NV04_PGRAPH_PATT_COLORRAM+0xdc,
174 NV04_PGRAPH_PATT_COLORRAM+0xe0,
175 NV04_PGRAPH_PATT_COLORRAM+0xe4,
176 NV04_PGRAPH_PATT_COLORRAM+0xe8,
177 NV04_PGRAPH_PATT_COLORRAM+0xec,
178 NV04_PGRAPH_PATT_COLORRAM+0xf0,
179 NV04_PGRAPH_PATT_COLORRAM+0xf4,
180 NV04_PGRAPH_PATT_COLORRAM+0xf8,
181 NV04_PGRAPH_PATT_COLORRAM+0xfc,
182 NV04_PGRAPH_PATTERN,
183 0x0040080c,
184 NV04_PGRAPH_PATTERN_SHAPE,
185 0x00400600,
186 NV04_PGRAPH_ROP3,
187 NV04_PGRAPH_CHROMA,
188 NV04_PGRAPH_BETA_AND,
189 NV04_PGRAPH_BETA_PREMULT,
190 NV04_PGRAPH_CONTROL0,
191 NV04_PGRAPH_CONTROL1,
192 NV04_PGRAPH_CONTROL2,
193 NV04_PGRAPH_BLEND,
194 NV04_PGRAPH_STORED_FMT,
195 NV04_PGRAPH_SOURCE_COLOR,
196 0x00400560,
197 0x00400568,
198 0x00400564,
199 0x0040056c,
200 0x00400400,
201 0x00400480,
202 0x00400404,
203 0x00400484,
204 0x00400408,
205 0x00400488,
206 0x0040040c,
207 0x0040048c,
208 0x00400410,
209 0x00400490,
210 0x00400414,
211 0x00400494,
212 0x00400418,
213 0x00400498,
214 0x0040041c,
215 0x0040049c,
216 0x00400420,
217 0x004004a0,
218 0x00400424,
219 0x004004a4,
220 0x00400428,
221 0x004004a8,
222 0x0040042c,
223 0x004004ac,
224 0x00400430,
225 0x004004b0,
226 0x00400434,
227 0x004004b4,
228 0x00400438,
229 0x004004b8,
230 0x0040043c,
231 0x004004bc,
232 0x00400440,
233 0x004004c0,
234 0x00400444,
235 0x004004c4,
236 0x00400448,
237 0x004004c8,
238 0x0040044c,
239 0x004004cc,
240 0x00400450,
241 0x004004d0,
242 0x00400454,
243 0x004004d4,
244 0x00400458,
245 0x004004d8,
246 0x0040045c,
247 0x004004dc,
248 0x00400460,
249 0x004004e0,
250 0x00400464,
251 0x004004e4,
252 0x00400468,
253 0x004004e8,
254 0x0040046c,
255 0x004004ec,
256 0x00400470,
257 0x004004f0,
258 0x00400474,
259 0x004004f4,
260 0x00400478,
261 0x004004f8,
262 0x0040047c,
263 0x004004fc,
264 0x00400534,
265 0x00400538,
266 0x00400514,
267 0x00400518,
268 0x0040051c,
269 0x00400520,
270 0x00400524,
271 0x00400528,
272 0x0040052c,
273 0x00400530,
274 0x00400d00,
275 0x00400d40,
276 0x00400d80,
277 0x00400d04,
278 0x00400d44,
279 0x00400d84,
280 0x00400d08,
281 0x00400d48,
282 0x00400d88,
283 0x00400d0c,
284 0x00400d4c,
285 0x00400d8c,
286 0x00400d10,
287 0x00400d50,
288 0x00400d90,
289 0x00400d14,
290 0x00400d54,
291 0x00400d94,
292 0x00400d18,
293 0x00400d58,
294 0x00400d98,
295 0x00400d1c,
296 0x00400d5c,
297 0x00400d9c,
298 0x00400d20,
299 0x00400d60,
300 0x00400da0,
301 0x00400d24,
302 0x00400d64,
303 0x00400da4,
304 0x00400d28,
305 0x00400d68,
306 0x00400da8,
307 0x00400d2c,
308 0x00400d6c,
309 0x00400dac,
310 0x00400d30,
311 0x00400d70,
312 0x00400db0,
313 0x00400d34,
314 0x00400d74,
315 0x00400db4,
316 0x00400d38,
317 0x00400d78,
318 0x00400db8,
319 0x00400d3c,
320 0x00400d7c,
321 0x00400dbc,
322 0x00400590,
323 0x00400594,
324 0x00400598,
325 0x0040059c,
326 0x004005a8,
327 0x004005ac,
328 0x004005b0,
329 0x004005b4,
330 0x004005c0,
331 0x004005c4,
332 0x004005c8,
333 0x004005cc,
334 0x004005d0,
335 0x004005d4,
336 0x004005d8,
337 0x004005dc,
338 0x004005e0,
339 NV04_PGRAPH_PASSTHRU_0,
340 NV04_PGRAPH_PASSTHRU_1,
341 NV04_PGRAPH_PASSTHRU_2,
342 NV04_PGRAPH_DVD_COLORFMT,
343 NV04_PGRAPH_SCALED_FORMAT,
344 NV04_PGRAPH_MISC24_0,
345 NV04_PGRAPH_MISC24_1,
346 NV04_PGRAPH_MISC24_2,
347 0x00400500,
348 0x00400504,
349 NV04_PGRAPH_VALID1,
350 NV04_PGRAPH_VALID2,
351 NV04_PGRAPH_DEBUG_3
352};
353
354struct nv04_graph_priv {
355 struct nouveau_graph base;
356 struct nv04_graph_chan *chan[16];
357 spinlock_t lock;
358};
359
360struct nv04_graph_chan {
361 struct nouveau_object base;
362 int chid;
363 u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
364};
365
366
367static inline struct nv04_graph_priv *
368nv04_graph_priv(struct nv04_graph_chan *chan)
369{
370 return (void *)nv_object(chan)->engine;
371}
372
373/*******************************************************************************
374 * Graphics object classes
375 ******************************************************************************/
376
377/*
378 * Software methods, why they are needed, and how they all work:
379 *
380 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
381 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
382 * 3 words long on both. grobj format on NV04 is:
383 *
384 * word 0:
385 * - bits 0-7: class
386 * - bit 12: color key active
387 * - bit 13: clip rect active
388 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
389 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
390 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
391 * NV03_CONTEXT_SURFACE_DST].
392 * - bits 15-17: 2d operation [aka patch config]
393 * - bit 24: patch valid [enables rendering using this object]
394 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
395 * word 1:
396 * - bits 0-1: mono format
397 * - bits 8-13: color format
398 * - bits 16-31: DMA_NOTIFY instance
399 * word 2:
400 * - bits 0-15: DMA_A instance
401 * - bits 16-31: DMA_B instance
402 *
403 * On NV05 it's:
404 *
405 * word 0:
406 * - bits 0-7: class
407 * - bit 12: color key active
408 * - bit 13: clip rect active
409 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
410 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
411 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
412 * NV03_CONTEXT_SURFACE_DST].
413 * - bits 15-17: 2d operation [aka patch config]
414 * - bits 20-22: dither mode
415 * - bit 24: patch valid [enables rendering using this object]
416 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
417 * - bit 26: surface_src/surface_zeta valid
418 * - bit 27: pattern valid
419 * - bit 28: rop valid
420 * - bit 29: beta1 valid
421 * - bit 30: beta4 valid
422 * word 1:
423 * - bits 0-1: mono format
424 * - bits 8-13: color format
425 * - bits 16-31: DMA_NOTIFY instance
426 * word 2:
427 * - bits 0-15: DMA_A instance
428 * - bits 16-31: DMA_B instance
429 *
430 * NV05 will set/unset the relevant valid bits when you poke the relevant
431 * object-binding methods with object of the proper type, or with the NULL
432 * type. It'll only allow rendering using the grobj if all needed objects
433 * are bound. The needed set of objects depends on selected operation: for
434 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
435 *
436 * NV04 doesn't have these methods implemented at all, and doesn't have the
437 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
438 * is set. So we have to emulate them in software, internally keeping the
439 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
440 * but the last word isn't actually used for anything, we abuse it for this
441 * purpose.
442 *
443 * Actually, NV05 can optionally check bit 24 too, but we disable this since
444 * there's no use for it.
445 *
446 * For unknown reasons, NV04 implements surf3d binding in hardware as an
447 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
448 * methods on the surf3d object, so we have to emulate them too.
449 */
450
451static void
452nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
453{
454 struct nv04_graph_priv *priv = (void *)object->engine;
455 int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
456 u32 tmp;
457
458 tmp = nv_ro32(object, 0x00);
459 tmp &= ~mask;
460 tmp |= value;
461 nv_wo32(object, 0x00, tmp);
462
463 nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
464 nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
465}
466
467static void
468nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
469{
470 int class, op, valid = 1;
471 u32 tmp, ctx1;
472
473 ctx1 = nv_ro32(object, 0x00);
474 class = ctx1 & 0xff;
475 op = (ctx1 >> 15) & 7;
476
477 tmp = nv_ro32(object, 0x0c);
478 tmp &= ~mask;
479 tmp |= value;
480 nv_wo32(object, 0x0c, tmp);
481
482 /* check for valid surf2d/surf_dst/surf_color */
483 if (!(tmp & 0x02000000))
484 valid = 0;
485 /* check for valid surf_src/surf_zeta */
486 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
487 valid = 0;
488
489 switch (op) {
490 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
491 case 0:
492 case 3:
493 break;
494 /* ROP_AND: requires pattern and rop */
495 case 1:
496 if (!(tmp & 0x18000000))
497 valid = 0;
498 break;
499 /* BLEND_AND: requires beta1 */
500 case 2:
501 if (!(tmp & 0x20000000))
502 valid = 0;
503 break;
504 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
505 case 4:
506 case 5:
507 if (!(tmp & 0x40000000))
508 valid = 0;
509 break;
510 }
511
512 nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
513}
514
515static int
516nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
517 void *args, u32 size)
518{
519 u32 class = nv_ro32(object, 0) & 0xff;
520 u32 data = *(u32 *)args;
521 if (data > 5)
522 return 1;
523 /* Old versions of the objects only accept first three operations. */
524 if (data > 2 && class < 0x40)
525 return 1;
526 nv04_graph_set_ctx1(object, 0x00038000, data << 15);
527 /* changing operation changes set of objects needed for validation */
528 nv04_graph_set_ctx_val(object, 0, 0);
529 return 0;
530}
531
532static int
533nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
534 void *args, u32 size)
535{
536 struct nv04_graph_priv *priv = (void *)object->engine;
537 u32 data = *(u32 *)args;
538 u32 min = data & 0xffff, max;
539 u32 w = data >> 16;
540 if (min & 0x8000)
541 /* too large */
542 return 1;
543 if (w & 0x8000)
544 /* yes, it accepts negative for some reason. */
545 w |= 0xffff0000;
546 max = min + w;
547 max &= 0x3ffff;
548 nv_wr32(priv, 0x40053c, min);
549 nv_wr32(priv, 0x400544, max);
550 return 0;
551}
552
553static int
554nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
555 void *args, u32 size)
556{
557 struct nv04_graph_priv *priv = (void *)object->engine;
558 u32 data = *(u32 *)args;
559 u32 min = data & 0xffff, max;
560 u32 w = data >> 16;
561 if (min & 0x8000)
562 /* too large */
563 return 1;
564 if (w & 0x8000)
565 /* yes, it accepts negative for some reason. */
566 w |= 0xffff0000;
567 max = min + w;
568 max &= 0x3ffff;
569 nv_wr32(priv, 0x400540, min);
570 nv_wr32(priv, 0x400548, max);
571 return 0;
572}
573
574static u16
575nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
576{
577 struct nouveau_instmem *imem = nouveau_instmem(object);
578 u32 inst = *(u32 *)args << 4;
579 return nv_ro32(imem, inst);
580}
581
582static int
583nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
584 void *args, u32 size)
585{
586 switch (nv04_graph_mthd_bind_class(object, args, size)) {
587 case 0x30:
588 nv04_graph_set_ctx1(object, 0x00004000, 0);
589 nv04_graph_set_ctx_val(object, 0x02000000, 0);
590 return 0;
591 case 0x42:
592 nv04_graph_set_ctx1(object, 0x00004000, 0);
593 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
594 return 0;
595 }
596 return 1;
597}
598
599static int
600nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
601 void *args, u32 size)
602{
603 switch (nv04_graph_mthd_bind_class(object, args, size)) {
604 case 0x30:
605 nv04_graph_set_ctx1(object, 0x00004000, 0);
606 nv04_graph_set_ctx_val(object, 0x02000000, 0);
607 return 0;
608 case 0x42:
609 nv04_graph_set_ctx1(object, 0x00004000, 0);
610 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
611 return 0;
612 case 0x52:
613 nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
614 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
615 return 0;
616 }
617 return 1;
618}
619
620static int
621nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
622 void *args, u32 size)
623{
624 switch (nv04_graph_mthd_bind_class(object, args, size)) {
625 case 0x30:
626 nv04_graph_set_ctx_val(object, 0x08000000, 0);
627 return 0;
628 case 0x18:
629 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
630 return 0;
631 }
632 return 1;
633}
634
635static int
636nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
637 void *args, u32 size)
638{
639 switch (nv04_graph_mthd_bind_class(object, args, size)) {
640 case 0x30:
641 nv04_graph_set_ctx_val(object, 0x08000000, 0);
642 return 0;
643 case 0x44:
644 nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
645 return 0;
646 }
647 return 1;
648}
649
650static int
651nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
652 void *args, u32 size)
653{
654 switch (nv04_graph_mthd_bind_class(object, args, size)) {
655 case 0x30:
656 nv04_graph_set_ctx_val(object, 0x10000000, 0);
657 return 0;
658 case 0x43:
659 nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
660 return 0;
661 }
662 return 1;
663}
664
665static int
666nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
667 void *args, u32 size)
668{
669 switch (nv04_graph_mthd_bind_class(object, args, size)) {
670 case 0x30:
671 nv04_graph_set_ctx_val(object, 0x20000000, 0);
672 return 0;
673 case 0x12:
674 nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
675 return 0;
676 }
677 return 1;
678}
679
680static int
681nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
682 void *args, u32 size)
683{
684 switch (nv04_graph_mthd_bind_class(object, args, size)) {
685 case 0x30:
686 nv04_graph_set_ctx_val(object, 0x40000000, 0);
687 return 0;
688 case 0x72:
689 nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
690 return 0;
691 }
692 return 1;
693}
694
695static int
696nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
697 void *args, u32 size)
698{
699 switch (nv04_graph_mthd_bind_class(object, args, size)) {
700 case 0x30:
701 nv04_graph_set_ctx_val(object, 0x02000000, 0);
702 return 0;
703 case 0x58:
704 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
705 return 0;
706 }
707 return 1;
708}
709
710static int
711nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
712 void *args, u32 size)
713{
714 switch (nv04_graph_mthd_bind_class(object, args, size)) {
715 case 0x30:
716 nv04_graph_set_ctx_val(object, 0x04000000, 0);
717 return 0;
718 case 0x59:
719 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
720 return 0;
721 }
722 return 1;
723}
724
725static int
726nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
727 void *args, u32 size)
728{
729 switch (nv04_graph_mthd_bind_class(object, args, size)) {
730 case 0x30:
731 nv04_graph_set_ctx_val(object, 0x02000000, 0);
732 return 0;
733 case 0x5a:
734 nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
735 return 0;
736 }
737 return 1;
738}
739
740static int
741nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
742 void *args, u32 size)
743{
744 switch (nv04_graph_mthd_bind_class(object, args, size)) {
745 case 0x30:
746 nv04_graph_set_ctx_val(object, 0x04000000, 0);
747 return 0;
748 case 0x5b:
749 nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
750 return 0;
751 }
752 return 1;
753}
754
755static int
756nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
757 void *args, u32 size)
758{
759 switch (nv04_graph_mthd_bind_class(object, args, size)) {
760 case 0x30:
761 nv04_graph_set_ctx1(object, 0x2000, 0);
762 return 0;
763 case 0x19:
764 nv04_graph_set_ctx1(object, 0x2000, 0x2000);
765 return 0;
766 }
767 return 1;
768}
769
770static int
771nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
772 void *args, u32 size)
773{
774 switch (nv04_graph_mthd_bind_class(object, args, size)) {
775 case 0x30:
776 nv04_graph_set_ctx1(object, 0x1000, 0);
777 return 0;
778 /* Yes, for some reason even the old versions of objects
779 * accept 0x57 and not 0x17. Consistency be damned.
780 */
781 case 0x57:
782 nv04_graph_set_ctx1(object, 0x1000, 0x1000);
783 return 0;
784 }
785 return 1;
786}
787
788static struct nouveau_omthds
789nv03_graph_gdi_omthds[] = {
790 { 0x0184, nv01_graph_mthd_bind_patt },
791 { 0x0188, nv04_graph_mthd_bind_rop },
792 { 0x018c, nv04_graph_mthd_bind_beta1 },
793 { 0x0190, nv04_graph_mthd_bind_surf_dst },
794 { 0x02fc, nv04_graph_mthd_set_operation },
795 {}
796};
797
798static struct nouveau_omthds
799nv04_graph_gdi_omthds[] = {
800 { 0x0188, nv04_graph_mthd_bind_patt },
801 { 0x018c, nv04_graph_mthd_bind_rop },
802 { 0x0190, nv04_graph_mthd_bind_beta1 },
803 { 0x0194, nv04_graph_mthd_bind_beta4 },
804 { 0x0198, nv04_graph_mthd_bind_surf2d },
805 { 0x02fc, nv04_graph_mthd_set_operation },
806 {}
807};
808
809static struct nouveau_omthds
810nv01_graph_blit_omthds[] = {
811 { 0x0184, nv01_graph_mthd_bind_chroma },
812 { 0x0188, nv01_graph_mthd_bind_clip },
813 { 0x018c, nv01_graph_mthd_bind_patt },
814 { 0x0190, nv04_graph_mthd_bind_rop },
815 { 0x0194, nv04_graph_mthd_bind_beta1 },
816 { 0x0198, nv04_graph_mthd_bind_surf_dst },
817 { 0x019c, nv04_graph_mthd_bind_surf_src },
818 { 0x02fc, nv04_graph_mthd_set_operation },
819 {}
820};
821
822static struct nouveau_omthds
823nv04_graph_blit_omthds[] = {
824 { 0x0184, nv01_graph_mthd_bind_chroma },
825 { 0x0188, nv01_graph_mthd_bind_clip },
826 { 0x018c, nv04_graph_mthd_bind_patt },
827 { 0x0190, nv04_graph_mthd_bind_rop },
828 { 0x0194, nv04_graph_mthd_bind_beta1 },
829 { 0x0198, nv04_graph_mthd_bind_beta4 },
830 { 0x019c, nv04_graph_mthd_bind_surf2d },
831 { 0x02fc, nv04_graph_mthd_set_operation },
832 {}
833};
834
835static struct nouveau_omthds
836nv04_graph_iifc_omthds[] = {
837 { 0x0188, nv01_graph_mthd_bind_chroma },
838 { 0x018c, nv01_graph_mthd_bind_clip },
839 { 0x0190, nv04_graph_mthd_bind_patt },
840 { 0x0194, nv04_graph_mthd_bind_rop },
841 { 0x0198, nv04_graph_mthd_bind_beta1 },
842 { 0x019c, nv04_graph_mthd_bind_beta4 },
843 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
844 { 0x03e4, nv04_graph_mthd_set_operation },
845 {}
846};
847
848static struct nouveau_omthds
849nv01_graph_ifc_omthds[] = {
850 { 0x0184, nv01_graph_mthd_bind_chroma },
851 { 0x0188, nv01_graph_mthd_bind_clip },
852 { 0x018c, nv01_graph_mthd_bind_patt },
853 { 0x0190, nv04_graph_mthd_bind_rop },
854 { 0x0194, nv04_graph_mthd_bind_beta1 },
855 { 0x0198, nv04_graph_mthd_bind_surf_dst },
856 { 0x02fc, nv04_graph_mthd_set_operation },
857 {}
858};
859
860static struct nouveau_omthds
861nv04_graph_ifc_omthds[] = {
862 { 0x0184, nv01_graph_mthd_bind_chroma },
863 { 0x0188, nv01_graph_mthd_bind_clip },
864 { 0x018c, nv04_graph_mthd_bind_patt },
865 { 0x0190, nv04_graph_mthd_bind_rop },
866 { 0x0194, nv04_graph_mthd_bind_beta1 },
867 { 0x0198, nv04_graph_mthd_bind_beta4 },
868 { 0x019c, nv04_graph_mthd_bind_surf2d },
869 { 0x02fc, nv04_graph_mthd_set_operation },
870 {}
871};
872
873static struct nouveau_omthds
874nv03_graph_sifc_omthds[] = {
875 { 0x0184, nv01_graph_mthd_bind_chroma },
876 { 0x0188, nv01_graph_mthd_bind_patt },
877 { 0x018c, nv04_graph_mthd_bind_rop },
878 { 0x0190, nv04_graph_mthd_bind_beta1 },
879 { 0x0194, nv04_graph_mthd_bind_surf_dst },
880 { 0x02fc, nv04_graph_mthd_set_operation },
881 {}
882};
883
884static struct nouveau_omthds
885nv04_graph_sifc_omthds[] = {
886 { 0x0184, nv01_graph_mthd_bind_chroma },
887 { 0x0188, nv04_graph_mthd_bind_patt },
888 { 0x018c, nv04_graph_mthd_bind_rop },
889 { 0x0190, nv04_graph_mthd_bind_beta1 },
890 { 0x0194, nv04_graph_mthd_bind_beta4 },
891 { 0x0198, nv04_graph_mthd_bind_surf2d },
892 { 0x02fc, nv04_graph_mthd_set_operation },
893 {}
894};
895
896static struct nouveau_omthds
897nv03_graph_sifm_omthds[] = {
898 { 0x0188, nv01_graph_mthd_bind_patt },
899 { 0x018c, nv04_graph_mthd_bind_rop },
900 { 0x0190, nv04_graph_mthd_bind_beta1 },
901 { 0x0194, nv04_graph_mthd_bind_surf_dst },
902 { 0x0304, nv04_graph_mthd_set_operation },
903 {}
904};
905
906static struct nouveau_omthds
907nv04_graph_sifm_omthds[] = {
908 { 0x0188, nv04_graph_mthd_bind_patt },
909 { 0x018c, nv04_graph_mthd_bind_rop },
910 { 0x0190, nv04_graph_mthd_bind_beta1 },
911 { 0x0194, nv04_graph_mthd_bind_beta4 },
912 { 0x0198, nv04_graph_mthd_bind_surf2d },
913 { 0x0304, nv04_graph_mthd_set_operation },
914 {}
915};
916
917static struct nouveau_omthds
918nv04_graph_surf3d_omthds[] = {
919 { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
920 { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
921 {}
922};
923
924static struct nouveau_omthds
925nv03_graph_ttri_omthds[] = {
926 { 0x0188, nv01_graph_mthd_bind_clip },
927 { 0x018c, nv04_graph_mthd_bind_surf_color },
928 { 0x0190, nv04_graph_mthd_bind_surf_zeta },
929 {}
930};
931
932static struct nouveau_omthds
933nv01_graph_prim_omthds[] = {
934 { 0x0184, nv01_graph_mthd_bind_clip },
935 { 0x0188, nv01_graph_mthd_bind_patt },
936 { 0x018c, nv04_graph_mthd_bind_rop },
937 { 0x0190, nv04_graph_mthd_bind_beta1 },
938 { 0x0194, nv04_graph_mthd_bind_surf_dst },
939 { 0x02fc, nv04_graph_mthd_set_operation },
940 {}
941};
942
943static struct nouveau_omthds
944nv04_graph_prim_omthds[] = {
945 { 0x0184, nv01_graph_mthd_bind_clip },
946 { 0x0188, nv04_graph_mthd_bind_patt },
947 { 0x018c, nv04_graph_mthd_bind_rop },
948 { 0x0190, nv04_graph_mthd_bind_beta1 },
949 { 0x0194, nv04_graph_mthd_bind_beta4 },
950 { 0x0198, nv04_graph_mthd_bind_surf2d },
951 { 0x02fc, nv04_graph_mthd_set_operation },
952 {}
953};
954
955static int
956nv04_graph_object_ctor(struct nouveau_object *parent,
957 struct nouveau_object *engine,
958 struct nouveau_oclass *oclass, void *data, u32 size,
959 struct nouveau_object **pobject)
960{
961 struct nouveau_gpuobj *obj;
962 int ret;
963
964 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
965 16, 16, 0, &obj);
966 *pobject = nv_object(obj);
967 if (ret)
968 return ret;
969
970 nv_wo32(obj, 0x00, nv_mclass(obj));
971#ifdef __BIG_ENDIAN
972 nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
973#endif
974 nv_wo32(obj, 0x04, 0x00000000);
975 nv_wo32(obj, 0x08, 0x00000000);
976 nv_wo32(obj, 0x0c, 0x00000000);
977 return 0;
978}
979
980struct nouveau_ofuncs
981nv04_graph_ofuncs = {
982 .ctor = nv04_graph_object_ctor,
983 .dtor = _nouveau_gpuobj_dtor,
984 .init = _nouveau_gpuobj_init,
985 .fini = _nouveau_gpuobj_fini,
986 .rd32 = _nouveau_gpuobj_rd32,
987 .wr32 = _nouveau_gpuobj_wr32,
988};
989
990static struct nouveau_oclass
991nv04_graph_sclass[] = {
992 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
993 { 0x0017, &nv04_graph_ofuncs }, /* chroma */
994 { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
995 { 0x0019, &nv04_graph_ofuncs }, /* clip */
996 { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
997 { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
998 { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
999 { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
1000 { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
1001 { 0x0030, &nv04_graph_ofuncs }, /* null */
1002 { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
1003 { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
1004 { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
1005 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
1006 { 0x0042, &nv04_graph_ofuncs }, /* surf2d */
1007 { 0x0043, &nv04_graph_ofuncs }, /* rop */
1008 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
1009 { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
1010 { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
1011 { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
1012 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
1013 { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
1014 { 0x0054, &nv04_graph_ofuncs }, /* ttri */
1015 { 0x0055, &nv04_graph_ofuncs }, /* mtri */
1016 { 0x0057, &nv04_graph_ofuncs }, /* chroma */
1017 { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
1018 { 0x0059, &nv04_graph_ofuncs }, /* surf_src */
1019 { 0x005a, &nv04_graph_ofuncs }, /* surf_color */
1020 { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
1021 { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
1022 { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
1023 { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
1024 { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
1025 { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
1026 { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
1027 { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
1028 { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
1029 { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
1030 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
1031 { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
1032 { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
1033 {},
1034};
1035
1036/*******************************************************************************
1037 * PGRAPH context
1038 ******************************************************************************/
1039
1040static struct nv04_graph_chan *
1041nv04_graph_channel(struct nv04_graph_priv *priv)
1042{
1043 struct nv04_graph_chan *chan = NULL;
1044 if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
1045 int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
1046 if (chid < ARRAY_SIZE(priv->chan))
1047 chan = priv->chan[chid];
1048 }
1049 return chan;
1050}
1051
1052static int
1053nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
1054{
1055 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1056 int i;
1057
1058 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1059 nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
1060
1061 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
1062 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
1063 nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
1064 return 0;
1065}
1066
1067static int
1068nv04_graph_unload_context(struct nv04_graph_chan *chan)
1069{
1070 struct nv04_graph_priv *priv = nv04_graph_priv(chan);
1071 int i;
1072
1073 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
1074 chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
1075
1076 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
1077 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1078 return 0;
1079}
1080
1081static void
1082nv04_graph_context_switch(struct nv04_graph_priv *priv)
1083{
1084 struct nv04_graph_chan *prev = NULL;
1085 struct nv04_graph_chan *next = NULL;
1086 unsigned long flags;
1087 int chid;
1088
1089 spin_lock_irqsave(&priv->lock, flags);
1090 nv04_graph_idle(priv);
1091
1092 /* If previous context is valid, we need to save it */
1093 prev = nv04_graph_channel(priv);
1094 if (prev)
1095 nv04_graph_unload_context(prev);
1096
1097 /* load context for next channel */
1098 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
1099 next = priv->chan[chid];
1100 if (next)
1101 nv04_graph_load_context(next, chid);
1102
1103 spin_unlock_irqrestore(&priv->lock, flags);
1104}
1105
1106static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
1107{
1108 int i;
1109
1110 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
1111 if (nv04_graph_ctx_regs[i] == reg)
1112 return &chan->nv04[i];
1113 }
1114
1115 return NULL;
1116}
1117
1118static int
1119nv04_graph_context_ctor(struct nouveau_object *parent,
1120 struct nouveau_object *engine,
1121 struct nouveau_oclass *oclass, void *data, u32 size,
1122 struct nouveau_object **pobject)
1123{
1124 struct nouveau_fifo_chan *fifo = (void *)parent;
1125 struct nv04_graph_priv *priv = (void *)engine;
1126 struct nv04_graph_chan *chan;
1127 unsigned long flags;
1128 int ret;
1129
1130 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
1131 *pobject = nv_object(chan);
1132 if (ret)
1133 return ret;
1134
1135 spin_lock_irqsave(&priv->lock, flags);
1136 if (priv->chan[fifo->chid]) {
1137 *pobject = nv_object(priv->chan[fifo->chid]);
1138 atomic_inc(&(*pobject)->refcount);
1139 spin_unlock_irqrestore(&priv->lock, flags);
1140 nouveau_object_destroy(&chan->base);
1141 return 1;
1142 }
1143
1144 *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
1145
1146 priv->chan[fifo->chid] = chan;
1147 chan->chid = fifo->chid;
1148 spin_unlock_irqrestore(&priv->lock, flags);
1149 return 0;
1150}
1151
1152static void
1153nv04_graph_context_dtor(struct nouveau_object *object)
1154{
1155 struct nv04_graph_priv *priv = (void *)object->engine;
1156 struct nv04_graph_chan *chan = (void *)object;
1157 unsigned long flags;
1158
1159 spin_lock_irqsave(&priv->lock, flags);
1160 priv->chan[chan->chid] = NULL;
1161 spin_unlock_irqrestore(&priv->lock, flags);
1162
1163 nouveau_object_destroy(&chan->base);
1164}
1165
1166static int
1167nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
1168{
1169 struct nv04_graph_priv *priv = (void *)object->engine;
1170 struct nv04_graph_chan *chan = (void *)object;
1171 unsigned long flags;
1172
1173 spin_lock_irqsave(&priv->lock, flags);
1174 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1175 if (nv04_graph_channel(priv) == chan)
1176 nv04_graph_unload_context(chan);
1177 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1178 spin_unlock_irqrestore(&priv->lock, flags);
1179
1180 return nouveau_object_fini(&chan->base, suspend);
1181}
1182
1183static struct nouveau_oclass
1184nv04_graph_cclass = {
1185 .handle = NV_ENGCTX(GR, 0x04),
1186 .ofuncs = &(struct nouveau_ofuncs) {
1187 .ctor = nv04_graph_context_ctor,
1188 .dtor = nv04_graph_context_dtor,
1189 .init = nouveau_object_init,
1190 .fini = nv04_graph_context_fini,
1191 },
1192};
1193
1194/*******************************************************************************
1195 * PGRAPH engine/subdev functions
1196 ******************************************************************************/
1197
1198bool
1199nv04_graph_idle(void *obj)
1200{
1201 struct nouveau_graph *graph = nouveau_graph(obj);
1202 u32 mask = 0xffffffff;
1203
1204 if (nv_device(obj)->card_type == NV_40)
1205 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1206
1207 if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
1208 nv_error(graph, "idle timed out with status 0x%08x\n",
1209 nv_rd32(graph, NV04_PGRAPH_STATUS));
1210 return false;
1211 }
1212
1213 return true;
1214}
1215
1216static const struct nouveau_bitfield
1217nv04_graph_intr_name[] = {
1218 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1219 {}
1220};
1221
1222static const struct nouveau_bitfield
1223nv04_graph_nstatus[] = {
1224 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1225 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1226 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1227 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1228 {}
1229};
1230
1231const struct nouveau_bitfield
1232nv04_graph_nsource[] = {
1233 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
1234 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
1235 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
1236 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
1237 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
1238 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
1239 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
1240 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
1241 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
1242 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
1243 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
1244 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
1245 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
1246 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
1247 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
1248 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
1249 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
1250 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
1251 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
1252 {}
1253};
1254
1255static void
1256nv04_graph_intr(struct nouveau_subdev *subdev)
1257{
1258 struct nv04_graph_priv *priv = (void *)subdev;
1259 struct nv04_graph_chan *chan = NULL;
1260 struct nouveau_namedb *namedb = NULL;
1261 struct nouveau_handle *handle = NULL;
1262 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
1263 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
1264 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
1265 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1266 u32 chid = (addr & 0x0f000000) >> 24;
1267 u32 subc = (addr & 0x0000e000) >> 13;
1268 u32 mthd = (addr & 0x00001ffc);
1269 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1270 u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
1271 u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
1272 u32 show = stat;
1273 unsigned long flags;
1274
1275 spin_lock_irqsave(&priv->lock, flags);
1276 chan = priv->chan[chid];
1277 if (chan)
1278 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1279 spin_unlock_irqrestore(&priv->lock, flags);
1280
1281 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1282 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1283 handle = nouveau_namedb_get_vinst(namedb, inst);
1284 if (handle && !nv_call(handle->object, mthd, data))
1285 show &= ~NV_PGRAPH_INTR_NOTIFY;
1286 }
1287 }
1288
1289 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1290 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1291 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1292 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1293 nv04_graph_context_switch(priv);
1294 }
1295
1296 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1297 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1298
1299 if (show) {
1300 nv_error(priv, "");
1301 nouveau_bitfield_print(nv04_graph_intr_name, show);
1302 printk(" nsource:");
1303 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1304 printk(" nstatus:");
1305 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1306 printk("\n");
1307 nv_error(priv, "ch %d/%d class 0x%04x "
1308 "mthd 0x%04x data 0x%08x\n",
1309 chid, subc, class, mthd, data);
1310 }
1311
1312 nouveau_namedb_put(handle);
1313}
1314
1315static int
1316nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1317 struct nouveau_oclass *oclass, void *data, u32 size,
1318 struct nouveau_object **pobject)
1319{
1320 struct nv04_graph_priv *priv;
1321 int ret;
1322
1323 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1324 *pobject = nv_object(priv);
1325 if (ret)
1326 return ret;
1327
1328 nv_subdev(priv)->unit = 0x00001000;
1329 nv_subdev(priv)->intr = nv04_graph_intr;
1330 nv_engine(priv)->cclass = &nv04_graph_cclass;
1331 nv_engine(priv)->sclass = nv04_graph_sclass;
1332 spin_lock_init(&priv->lock);
1333 return 0;
1334}
1335
1336static int
1337nv04_graph_init(struct nouveau_object *object)
1338{
1339 struct nouveau_engine *engine = nv_engine(object);
1340 struct nv04_graph_priv *priv = (void *)engine;
1341 int ret;
1342
1343 ret = nouveau_graph_init(&priv->base);
1344 if (ret)
1345 return ret;
1346
1347 /* Enable PGRAPH interrupts */
1348 nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
1349 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1350
1351 nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
1352 nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
1353 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
1354 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
1355 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
1356 /*1231C000 blob, 001 haiku*/
1357 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
1358 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
1359 /*0x72111100 blob , 01 haiku*/
1360 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
1361 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
1362 /*haiku same*/
1363
1364 /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
1365 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
1366 /*haiku and blob 10d4*/
1367
1368 nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF);
1369 nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
1370 nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
1371
1372 /* These don't belong here, they're part of a per-channel context */
1373 nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
1374 nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
1375 return 0;
1376}
1377
1378struct nouveau_oclass
1379nv04_graph_oclass = {
1380 .handle = NV_ENGINE(GR, 0x04),
1381 .ofuncs = &(struct nouveau_ofuncs) {
1382 .ctor = nv04_graph_ctor,
1383 .dtor = _nouveau_graph_dtor,
1384 .init = nv04_graph_init,
1385 .fini = _nouveau_graph_fini,
1386 },
1387};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
new file mode 100644
index 000000000000..92521c89e77f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -0,0 +1,1314 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28
29#include <subdev/fb.h>
30
31#include <engine/fifo.h>
32#include <engine/graph.h>
33
34#include "regs.h"
35
36struct pipe_state {
37 u32 pipe_0x0000[0x040/4];
38 u32 pipe_0x0040[0x010/4];
39 u32 pipe_0x0200[0x0c0/4];
40 u32 pipe_0x4400[0x080/4];
41 u32 pipe_0x6400[0x3b0/4];
42 u32 pipe_0x6800[0x2f0/4];
43 u32 pipe_0x6c00[0x030/4];
44 u32 pipe_0x7000[0x130/4];
45 u32 pipe_0x7400[0x0c0/4];
46 u32 pipe_0x7800[0x0c0/4];
47};
48
49static int nv10_graph_ctx_regs[] = {
50 NV10_PGRAPH_CTX_SWITCH(0),
51 NV10_PGRAPH_CTX_SWITCH(1),
52 NV10_PGRAPH_CTX_SWITCH(2),
53 NV10_PGRAPH_CTX_SWITCH(3),
54 NV10_PGRAPH_CTX_SWITCH(4),
55 NV10_PGRAPH_CTX_CACHE(0, 0),
56 NV10_PGRAPH_CTX_CACHE(0, 1),
57 NV10_PGRAPH_CTX_CACHE(0, 2),
58 NV10_PGRAPH_CTX_CACHE(0, 3),
59 NV10_PGRAPH_CTX_CACHE(0, 4),
60 NV10_PGRAPH_CTX_CACHE(1, 0),
61 NV10_PGRAPH_CTX_CACHE(1, 1),
62 NV10_PGRAPH_CTX_CACHE(1, 2),
63 NV10_PGRAPH_CTX_CACHE(1, 3),
64 NV10_PGRAPH_CTX_CACHE(1, 4),
65 NV10_PGRAPH_CTX_CACHE(2, 0),
66 NV10_PGRAPH_CTX_CACHE(2, 1),
67 NV10_PGRAPH_CTX_CACHE(2, 2),
68 NV10_PGRAPH_CTX_CACHE(2, 3),
69 NV10_PGRAPH_CTX_CACHE(2, 4),
70 NV10_PGRAPH_CTX_CACHE(3, 0),
71 NV10_PGRAPH_CTX_CACHE(3, 1),
72 NV10_PGRAPH_CTX_CACHE(3, 2),
73 NV10_PGRAPH_CTX_CACHE(3, 3),
74 NV10_PGRAPH_CTX_CACHE(3, 4),
75 NV10_PGRAPH_CTX_CACHE(4, 0),
76 NV10_PGRAPH_CTX_CACHE(4, 1),
77 NV10_PGRAPH_CTX_CACHE(4, 2),
78 NV10_PGRAPH_CTX_CACHE(4, 3),
79 NV10_PGRAPH_CTX_CACHE(4, 4),
80 NV10_PGRAPH_CTX_CACHE(5, 0),
81 NV10_PGRAPH_CTX_CACHE(5, 1),
82 NV10_PGRAPH_CTX_CACHE(5, 2),
83 NV10_PGRAPH_CTX_CACHE(5, 3),
84 NV10_PGRAPH_CTX_CACHE(5, 4),
85 NV10_PGRAPH_CTX_CACHE(6, 0),
86 NV10_PGRAPH_CTX_CACHE(6, 1),
87 NV10_PGRAPH_CTX_CACHE(6, 2),
88 NV10_PGRAPH_CTX_CACHE(6, 3),
89 NV10_PGRAPH_CTX_CACHE(6, 4),
90 NV10_PGRAPH_CTX_CACHE(7, 0),
91 NV10_PGRAPH_CTX_CACHE(7, 1),
92 NV10_PGRAPH_CTX_CACHE(7, 2),
93 NV10_PGRAPH_CTX_CACHE(7, 3),
94 NV10_PGRAPH_CTX_CACHE(7, 4),
95 NV10_PGRAPH_CTX_USER,
96 NV04_PGRAPH_DMA_START_0,
97 NV04_PGRAPH_DMA_START_1,
98 NV04_PGRAPH_DMA_LENGTH,
99 NV04_PGRAPH_DMA_MISC,
100 NV10_PGRAPH_DMA_PITCH,
101 NV04_PGRAPH_BOFFSET0,
102 NV04_PGRAPH_BBASE0,
103 NV04_PGRAPH_BLIMIT0,
104 NV04_PGRAPH_BOFFSET1,
105 NV04_PGRAPH_BBASE1,
106 NV04_PGRAPH_BLIMIT1,
107 NV04_PGRAPH_BOFFSET2,
108 NV04_PGRAPH_BBASE2,
109 NV04_PGRAPH_BLIMIT2,
110 NV04_PGRAPH_BOFFSET3,
111 NV04_PGRAPH_BBASE3,
112 NV04_PGRAPH_BLIMIT3,
113 NV04_PGRAPH_BOFFSET4,
114 NV04_PGRAPH_BBASE4,
115 NV04_PGRAPH_BLIMIT4,
116 NV04_PGRAPH_BOFFSET5,
117 NV04_PGRAPH_BBASE5,
118 NV04_PGRAPH_BLIMIT5,
119 NV04_PGRAPH_BPITCH0,
120 NV04_PGRAPH_BPITCH1,
121 NV04_PGRAPH_BPITCH2,
122 NV04_PGRAPH_BPITCH3,
123 NV04_PGRAPH_BPITCH4,
124 NV10_PGRAPH_SURFACE,
125 NV10_PGRAPH_STATE,
126 NV04_PGRAPH_BSWIZZLE2,
127 NV04_PGRAPH_BSWIZZLE5,
128 NV04_PGRAPH_BPIXEL,
129 NV10_PGRAPH_NOTIFY,
130 NV04_PGRAPH_PATT_COLOR0,
131 NV04_PGRAPH_PATT_COLOR1,
132 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
133 0x00400904,
134 0x00400908,
135 0x0040090c,
136 0x00400910,
137 0x00400914,
138 0x00400918,
139 0x0040091c,
140 0x00400920,
141 0x00400924,
142 0x00400928,
143 0x0040092c,
144 0x00400930,
145 0x00400934,
146 0x00400938,
147 0x0040093c,
148 0x00400940,
149 0x00400944,
150 0x00400948,
151 0x0040094c,
152 0x00400950,
153 0x00400954,
154 0x00400958,
155 0x0040095c,
156 0x00400960,
157 0x00400964,
158 0x00400968,
159 0x0040096c,
160 0x00400970,
161 0x00400974,
162 0x00400978,
163 0x0040097c,
164 0x00400980,
165 0x00400984,
166 0x00400988,
167 0x0040098c,
168 0x00400990,
169 0x00400994,
170 0x00400998,
171 0x0040099c,
172 0x004009a0,
173 0x004009a4,
174 0x004009a8,
175 0x004009ac,
176 0x004009b0,
177 0x004009b4,
178 0x004009b8,
179 0x004009bc,
180 0x004009c0,
181 0x004009c4,
182 0x004009c8,
183 0x004009cc,
184 0x004009d0,
185 0x004009d4,
186 0x004009d8,
187 0x004009dc,
188 0x004009e0,
189 0x004009e4,
190 0x004009e8,
191 0x004009ec,
192 0x004009f0,
193 0x004009f4,
194 0x004009f8,
195 0x004009fc,
196 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
197 0x0040080c,
198 NV04_PGRAPH_PATTERN_SHAPE,
199 NV03_PGRAPH_MONO_COLOR0,
200 NV04_PGRAPH_ROP3,
201 NV04_PGRAPH_CHROMA,
202 NV04_PGRAPH_BETA_AND,
203 NV04_PGRAPH_BETA_PREMULT,
204 0x00400e70,
205 0x00400e74,
206 0x00400e78,
207 0x00400e7c,
208 0x00400e80,
209 0x00400e84,
210 0x00400e88,
211 0x00400e8c,
212 0x00400ea0,
213 0x00400ea4,
214 0x00400ea8,
215 0x00400e90,
216 0x00400e94,
217 0x00400e98,
218 0x00400e9c,
219 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
220 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
221 0x00400f04,
222 0x00400f24,
223 0x00400f08,
224 0x00400f28,
225 0x00400f0c,
226 0x00400f2c,
227 0x00400f10,
228 0x00400f30,
229 0x00400f14,
230 0x00400f34,
231 0x00400f18,
232 0x00400f38,
233 0x00400f1c,
234 0x00400f3c,
235 NV10_PGRAPH_XFMODE0,
236 NV10_PGRAPH_XFMODE1,
237 NV10_PGRAPH_GLOBALSTATE0,
238 NV10_PGRAPH_GLOBALSTATE1,
239 NV04_PGRAPH_STORED_FMT,
240 NV04_PGRAPH_SOURCE_COLOR,
241 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
242 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
243 0x00400404,
244 0x00400484,
245 0x00400408,
246 0x00400488,
247 0x0040040c,
248 0x0040048c,
249 0x00400410,
250 0x00400490,
251 0x00400414,
252 0x00400494,
253 0x00400418,
254 0x00400498,
255 0x0040041c,
256 0x0040049c,
257 0x00400420,
258 0x004004a0,
259 0x00400424,
260 0x004004a4,
261 0x00400428,
262 0x004004a8,
263 0x0040042c,
264 0x004004ac,
265 0x00400430,
266 0x004004b0,
267 0x00400434,
268 0x004004b4,
269 0x00400438,
270 0x004004b8,
271 0x0040043c,
272 0x004004bc,
273 0x00400440,
274 0x004004c0,
275 0x00400444,
276 0x004004c4,
277 0x00400448,
278 0x004004c8,
279 0x0040044c,
280 0x004004cc,
281 0x00400450,
282 0x004004d0,
283 0x00400454,
284 0x004004d4,
285 0x00400458,
286 0x004004d8,
287 0x0040045c,
288 0x004004dc,
289 0x00400460,
290 0x004004e0,
291 0x00400464,
292 0x004004e4,
293 0x00400468,
294 0x004004e8,
295 0x0040046c,
296 0x004004ec,
297 0x00400470,
298 0x004004f0,
299 0x00400474,
300 0x004004f4,
301 0x00400478,
302 0x004004f8,
303 0x0040047c,
304 0x004004fc,
305 NV03_PGRAPH_ABS_UCLIP_XMIN,
306 NV03_PGRAPH_ABS_UCLIP_XMAX,
307 NV03_PGRAPH_ABS_UCLIP_YMIN,
308 NV03_PGRAPH_ABS_UCLIP_YMAX,
309 0x00400550,
310 0x00400558,
311 0x00400554,
312 0x0040055c,
313 NV03_PGRAPH_ABS_UCLIPA_XMIN,
314 NV03_PGRAPH_ABS_UCLIPA_XMAX,
315 NV03_PGRAPH_ABS_UCLIPA_YMIN,
316 NV03_PGRAPH_ABS_UCLIPA_YMAX,
317 NV03_PGRAPH_ABS_ICLIP_XMAX,
318 NV03_PGRAPH_ABS_ICLIP_YMAX,
319 NV03_PGRAPH_XY_LOGIC_MISC0,
320 NV03_PGRAPH_XY_LOGIC_MISC1,
321 NV03_PGRAPH_XY_LOGIC_MISC2,
322 NV03_PGRAPH_XY_LOGIC_MISC3,
323 NV03_PGRAPH_CLIPX_0,
324 NV03_PGRAPH_CLIPX_1,
325 NV03_PGRAPH_CLIPY_0,
326 NV03_PGRAPH_CLIPY_1,
327 NV10_PGRAPH_COMBINER0_IN_ALPHA,
328 NV10_PGRAPH_COMBINER1_IN_ALPHA,
329 NV10_PGRAPH_COMBINER0_IN_RGB,
330 NV10_PGRAPH_COMBINER1_IN_RGB,
331 NV10_PGRAPH_COMBINER_COLOR0,
332 NV10_PGRAPH_COMBINER_COLOR1,
333 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
334 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
335 NV10_PGRAPH_COMBINER0_OUT_RGB,
336 NV10_PGRAPH_COMBINER1_OUT_RGB,
337 NV10_PGRAPH_COMBINER_FINAL0,
338 NV10_PGRAPH_COMBINER_FINAL1,
339 0x00400e00,
340 0x00400e04,
341 0x00400e08,
342 0x00400e0c,
343 0x00400e10,
344 0x00400e14,
345 0x00400e18,
346 0x00400e1c,
347 0x00400e20,
348 0x00400e24,
349 0x00400e28,
350 0x00400e2c,
351 0x00400e30,
352 0x00400e34,
353 0x00400e38,
354 0x00400e3c,
355 NV04_PGRAPH_PASSTHRU_0,
356 NV04_PGRAPH_PASSTHRU_1,
357 NV04_PGRAPH_PASSTHRU_2,
358 NV10_PGRAPH_DIMX_TEXTURE,
359 NV10_PGRAPH_WDIMX_TEXTURE,
360 NV10_PGRAPH_DVD_COLORFMT,
361 NV10_PGRAPH_SCALED_FORMAT,
362 NV04_PGRAPH_MISC24_0,
363 NV04_PGRAPH_MISC24_1,
364 NV04_PGRAPH_MISC24_2,
365 NV03_PGRAPH_X_MISC,
366 NV03_PGRAPH_Y_MISC,
367 NV04_PGRAPH_VALID1,
368 NV04_PGRAPH_VALID2,
369};
370
371static int nv17_graph_ctx_regs[] = {
372 NV10_PGRAPH_DEBUG_4,
373 0x004006b0,
374 0x00400eac,
375 0x00400eb0,
376 0x00400eb4,
377 0x00400eb8,
378 0x00400ebc,
379 0x00400ec0,
380 0x00400ec4,
381 0x00400ec8,
382 0x00400ecc,
383 0x00400ed0,
384 0x00400ed4,
385 0x00400ed8,
386 0x00400edc,
387 0x00400ee0,
388 0x00400a00,
389 0x00400a04,
390};
391
392struct nv10_graph_priv {
393 struct nouveau_graph base;
394 struct nv10_graph_chan *chan[32];
395 spinlock_t lock;
396};
397
398struct nv10_graph_chan {
399 struct nouveau_object base;
400 int chid;
401 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
402 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
403 struct pipe_state pipe_state;
404 u32 lma_window[4];
405};
406
407
408static inline struct nv10_graph_priv *
409nv10_graph_priv(struct nv10_graph_chan *chan)
410{
411 return (void *)nv_object(chan)->engine;
412}
413
414/*******************************************************************************
415 * Graphics object classes
416 ******************************************************************************/
417
418#define PIPE_SAVE(priv, state, addr) \
419 do { \
420 int __i; \
421 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
422 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
423 state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
424 } while (0)
425
426#define PIPE_RESTORE(priv, state, addr) \
427 do { \
428 int __i; \
429 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
430 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
431 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
432 } while (0)
433
434static struct nouveau_oclass
435nv10_graph_sclass[] = {
436 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
437 { 0x0019, &nv04_graph_ofuncs }, /* clip */
438 { 0x0030, &nv04_graph_ofuncs }, /* null */
439 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
440 { 0x0043, &nv04_graph_ofuncs }, /* rop */
441 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
442 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
443 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
444 { 0x005f, &nv04_graph_ofuncs }, /* blit */
445 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
446 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
447 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
448 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
449 { 0x009f, &nv04_graph_ofuncs }, /* blit */
450 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
451 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
452 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
453 { 0x0056, &nv04_graph_ofuncs }, /* celcius */
454 {},
455};
456
457static struct nouveau_oclass
458nv15_graph_sclass[] = {
459 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
460 { 0x0019, &nv04_graph_ofuncs }, /* clip */
461 { 0x0030, &nv04_graph_ofuncs }, /* null */
462 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
463 { 0x0043, &nv04_graph_ofuncs }, /* rop */
464 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
465 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
466 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
467 { 0x005f, &nv04_graph_ofuncs }, /* blit */
468 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
469 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
470 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
471 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
472 { 0x009f, &nv04_graph_ofuncs }, /* blit */
473 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
474 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
475 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
476 { 0x0096, &nv04_graph_ofuncs }, /* celcius */
477 {},
478};
479
480static int
481nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
482 void *args, u32 size)
483{
484 struct nv10_graph_chan *chan = (void *)object->parent;
485 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
486 struct pipe_state *pipe = &chan->pipe_state;
487 u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
488 u32 xfmode0, xfmode1;
489 u32 data = *(u32 *)args;
490 int i;
491
492 chan->lma_window[(mthd - 0x1638) / 4] = data;
493
494 if (mthd != 0x1644)
495 return 0;
496
497 nv04_graph_idle(priv);
498
499 PIPE_SAVE(priv, pipe_0x0040, 0x0040);
500 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
501
502 PIPE_RESTORE(priv, chan->lma_window, 0x6790);
503
504 nv04_graph_idle(priv);
505
506 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
507 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
508
509 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
510 PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
511 PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
512 PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
513
514 nv04_graph_idle(priv);
515
516 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
517 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
518 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
519 for (i = 0; i < 4; i++)
520 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
521 for (i = 0; i < 4; i++)
522 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
523
524 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
525 for (i = 0; i < 3; i++)
526 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
527
528 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
529 for (i = 0; i < 3; i++)
530 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
531
532 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
533 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
534
535 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
536
537 nv04_graph_idle(priv);
538
539 PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
540
541 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
542 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
543
544 PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
545 PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
546 PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
547 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
548
549 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
550 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
551
552 nv04_graph_idle(priv);
553
554 return 0;
555}
556
557static int
558nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
559 void *args, u32 size)
560{
561 struct nv10_graph_chan *chan = (void *)object->parent;
562 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
563
564 nv04_graph_idle(priv);
565
566 nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
567 nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
568 return 0;
569}
570
571static struct nouveau_omthds
572nv17_celcius_omthds[] = {
573 { 0x1638, nv17_graph_mthd_lma_window },
574 { 0x163c, nv17_graph_mthd_lma_window },
575 { 0x1640, nv17_graph_mthd_lma_window },
576 { 0x1644, nv17_graph_mthd_lma_window },
577 { 0x1658, nv17_graph_mthd_lma_enable },
578 {}
579};
580
581static struct nouveau_oclass
582nv17_graph_sclass[] = {
583 { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
584 { 0x0019, &nv04_graph_ofuncs }, /* clip */
585 { 0x0030, &nv04_graph_ofuncs }, /* null */
586 { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
587 { 0x0043, &nv04_graph_ofuncs }, /* rop */
588 { 0x0044, &nv04_graph_ofuncs }, /* pattern */
589 { 0x004a, &nv04_graph_ofuncs }, /* gdi */
590 { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
591 { 0x005f, &nv04_graph_ofuncs }, /* blit */
592 { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
593 { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
594 { 0x0089, &nv04_graph_ofuncs }, /* sifm */
595 { 0x008a, &nv04_graph_ofuncs }, /* ifc */
596 { 0x009f, &nv04_graph_ofuncs }, /* blit */
597 { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
598 { 0x0094, &nv04_graph_ofuncs }, /* ttri */
599 { 0x0095, &nv04_graph_ofuncs }, /* mtri */
600 { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
601 {},
602};
603
604/*******************************************************************************
605 * PGRAPH context
606 ******************************************************************************/
607
608static struct nv10_graph_chan *
609nv10_graph_channel(struct nv10_graph_priv *priv)
610{
611 struct nv10_graph_chan *chan = NULL;
612 if (nv_rd32(priv, 0x400144) & 0x00010000) {
613 int chid = nv_rd32(priv, 0x400148) >> 24;
614 if (chid < ARRAY_SIZE(priv->chan))
615 chan = priv->chan[chid];
616 }
617 return chan;
618}
619
620static void
621nv10_graph_save_pipe(struct nv10_graph_chan *chan)
622{
623 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
624 struct pipe_state *pipe = &chan->pipe_state;
625
626 PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
627 PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
628 PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
629 PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
630 PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
631 PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
632 PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
633 PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
634 PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
635 PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
636}
637
638static void
639nv10_graph_load_pipe(struct nv10_graph_chan *chan)
640{
641 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
642 struct pipe_state *pipe = &chan->pipe_state;
643 u32 xfmode0, xfmode1;
644 int i;
645
646 nv04_graph_idle(priv);
647 /* XXX check haiku comments */
648 xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
649 xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
650 nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
651 nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
652 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
653 for (i = 0; i < 4; i++)
654 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
655 for (i = 0; i < 4; i++)
656 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
657
658 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
659 for (i = 0; i < 3; i++)
660 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
661
662 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
663 for (i = 0; i < 3; i++)
664 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
665
666 nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
667 nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
668
669
670 PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
671 nv04_graph_idle(priv);
672
673 /* restore XFMODE */
674 nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
675 nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
676 PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
677 PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
678 PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
679 PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
680 PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
681 PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
682 PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
683 PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
684 PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
685 nv04_graph_idle(priv);
686}
687
688static void
689nv10_graph_create_pipe(struct nv10_graph_chan *chan)
690{
691 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
692 struct pipe_state *pipe_state = &chan->pipe_state;
693 u32 *pipe_state_addr;
694 int i;
695#define PIPE_INIT(addr) \
696 do { \
697 pipe_state_addr = pipe_state->pipe_##addr; \
698 } while (0)
699#define PIPE_INIT_END(addr) \
700 do { \
701 u32 *__end_addr = pipe_state->pipe_##addr + \
702 ARRAY_SIZE(pipe_state->pipe_##addr); \
703 if (pipe_state_addr != __end_addr) \
704 nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \
705 addr, pipe_state_addr, __end_addr); \
706 } while (0)
707#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
708
709 PIPE_INIT(0x0200);
710 for (i = 0; i < 48; i++)
711 NV_WRITE_PIPE_INIT(0x00000000);
712 PIPE_INIT_END(0x0200);
713
714 PIPE_INIT(0x6400);
715 for (i = 0; i < 211; i++)
716 NV_WRITE_PIPE_INIT(0x00000000);
717 NV_WRITE_PIPE_INIT(0x3f800000);
718 NV_WRITE_PIPE_INIT(0x40000000);
719 NV_WRITE_PIPE_INIT(0x40000000);
720 NV_WRITE_PIPE_INIT(0x40000000);
721 NV_WRITE_PIPE_INIT(0x40000000);
722 NV_WRITE_PIPE_INIT(0x00000000);
723 NV_WRITE_PIPE_INIT(0x00000000);
724 NV_WRITE_PIPE_INIT(0x3f800000);
725 NV_WRITE_PIPE_INIT(0x00000000);
726 NV_WRITE_PIPE_INIT(0x3f000000);
727 NV_WRITE_PIPE_INIT(0x3f000000);
728 NV_WRITE_PIPE_INIT(0x00000000);
729 NV_WRITE_PIPE_INIT(0x00000000);
730 NV_WRITE_PIPE_INIT(0x00000000);
731 NV_WRITE_PIPE_INIT(0x00000000);
732 NV_WRITE_PIPE_INIT(0x3f800000);
733 NV_WRITE_PIPE_INIT(0x00000000);
734 NV_WRITE_PIPE_INIT(0x00000000);
735 NV_WRITE_PIPE_INIT(0x00000000);
736 NV_WRITE_PIPE_INIT(0x00000000);
737 NV_WRITE_PIPE_INIT(0x00000000);
738 NV_WRITE_PIPE_INIT(0x3f800000);
739 NV_WRITE_PIPE_INIT(0x3f800000);
740 NV_WRITE_PIPE_INIT(0x3f800000);
741 NV_WRITE_PIPE_INIT(0x3f800000);
742 PIPE_INIT_END(0x6400);
743
744 PIPE_INIT(0x6800);
745 for (i = 0; i < 162; i++)
746 NV_WRITE_PIPE_INIT(0x00000000);
747 NV_WRITE_PIPE_INIT(0x3f800000);
748 for (i = 0; i < 25; i++)
749 NV_WRITE_PIPE_INIT(0x00000000);
750 PIPE_INIT_END(0x6800);
751
752 PIPE_INIT(0x6c00);
753 NV_WRITE_PIPE_INIT(0x00000000);
754 NV_WRITE_PIPE_INIT(0x00000000);
755 NV_WRITE_PIPE_INIT(0x00000000);
756 NV_WRITE_PIPE_INIT(0x00000000);
757 NV_WRITE_PIPE_INIT(0xbf800000);
758 NV_WRITE_PIPE_INIT(0x00000000);
759 NV_WRITE_PIPE_INIT(0x00000000);
760 NV_WRITE_PIPE_INIT(0x00000000);
761 NV_WRITE_PIPE_INIT(0x00000000);
762 NV_WRITE_PIPE_INIT(0x00000000);
763 NV_WRITE_PIPE_INIT(0x00000000);
764 NV_WRITE_PIPE_INIT(0x00000000);
765 PIPE_INIT_END(0x6c00);
766
767 PIPE_INIT(0x7000);
768 NV_WRITE_PIPE_INIT(0x00000000);
769 NV_WRITE_PIPE_INIT(0x00000000);
770 NV_WRITE_PIPE_INIT(0x00000000);
771 NV_WRITE_PIPE_INIT(0x00000000);
772 NV_WRITE_PIPE_INIT(0x00000000);
773 NV_WRITE_PIPE_INIT(0x00000000);
774 NV_WRITE_PIPE_INIT(0x00000000);
775 NV_WRITE_PIPE_INIT(0x00000000);
776 NV_WRITE_PIPE_INIT(0x00000000);
777 NV_WRITE_PIPE_INIT(0x00000000);
778 NV_WRITE_PIPE_INIT(0x00000000);
779 NV_WRITE_PIPE_INIT(0x00000000);
780 NV_WRITE_PIPE_INIT(0x7149f2ca);
781 NV_WRITE_PIPE_INIT(0x00000000);
782 NV_WRITE_PIPE_INIT(0x00000000);
783 NV_WRITE_PIPE_INIT(0x00000000);
784 NV_WRITE_PIPE_INIT(0x7149f2ca);
785 NV_WRITE_PIPE_INIT(0x00000000);
786 NV_WRITE_PIPE_INIT(0x00000000);
787 NV_WRITE_PIPE_INIT(0x00000000);
788 NV_WRITE_PIPE_INIT(0x7149f2ca);
789 NV_WRITE_PIPE_INIT(0x00000000);
790 NV_WRITE_PIPE_INIT(0x00000000);
791 NV_WRITE_PIPE_INIT(0x00000000);
792 NV_WRITE_PIPE_INIT(0x7149f2ca);
793 NV_WRITE_PIPE_INIT(0x00000000);
794 NV_WRITE_PIPE_INIT(0x00000000);
795 NV_WRITE_PIPE_INIT(0x00000000);
796 NV_WRITE_PIPE_INIT(0x7149f2ca);
797 NV_WRITE_PIPE_INIT(0x00000000);
798 NV_WRITE_PIPE_INIT(0x00000000);
799 NV_WRITE_PIPE_INIT(0x00000000);
800 NV_WRITE_PIPE_INIT(0x7149f2ca);
801 NV_WRITE_PIPE_INIT(0x00000000);
802 NV_WRITE_PIPE_INIT(0x00000000);
803 NV_WRITE_PIPE_INIT(0x00000000);
804 NV_WRITE_PIPE_INIT(0x7149f2ca);
805 NV_WRITE_PIPE_INIT(0x00000000);
806 NV_WRITE_PIPE_INIT(0x00000000);
807 NV_WRITE_PIPE_INIT(0x00000000);
808 NV_WRITE_PIPE_INIT(0x7149f2ca);
809 for (i = 0; i < 35; i++)
810 NV_WRITE_PIPE_INIT(0x00000000);
811 PIPE_INIT_END(0x7000);
812
813 PIPE_INIT(0x7400);
814 for (i = 0; i < 48; i++)
815 NV_WRITE_PIPE_INIT(0x00000000);
816 PIPE_INIT_END(0x7400);
817
818 PIPE_INIT(0x7800);
819 for (i = 0; i < 48; i++)
820 NV_WRITE_PIPE_INIT(0x00000000);
821 PIPE_INIT_END(0x7800);
822
823 PIPE_INIT(0x4400);
824 for (i = 0; i < 32; i++)
825 NV_WRITE_PIPE_INIT(0x00000000);
826 PIPE_INIT_END(0x4400);
827
828 PIPE_INIT(0x0000);
829 for (i = 0; i < 16; i++)
830 NV_WRITE_PIPE_INIT(0x00000000);
831 PIPE_INIT_END(0x0000);
832
833 PIPE_INIT(0x0040);
834 for (i = 0; i < 4; i++)
835 NV_WRITE_PIPE_INIT(0x00000000);
836 PIPE_INIT_END(0x0040);
837
838#undef PIPE_INIT
839#undef PIPE_INIT_END
840#undef NV_WRITE_PIPE_INIT
841}
842
843static int
844nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
845{
846 int i;
847 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
848 if (nv10_graph_ctx_regs[i] == reg)
849 return i;
850 }
851 nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
852 return -1;
853}
854
855static int
856nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
857{
858 int i;
859 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
860 if (nv17_graph_ctx_regs[i] == reg)
861 return i;
862 }
863 nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
864 return -1;
865}
866
867static void
868nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
869{
870 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
871 u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
872 u32 ctx_user, ctx_switch[5];
873 int i, subchan = -1;
874
875 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
876 * that cannot be restored via MMIO. Do it through the FIFO
877 * instead.
878 */
879
880 /* Look for a celsius object */
881 for (i = 0; i < 8; i++) {
882 int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
883
884 if (class == 0x56 || class == 0x96 || class == 0x99) {
885 subchan = i;
886 break;
887 }
888 }
889
890 if (subchan < 0 || !inst)
891 return;
892
893 /* Save the current ctx object */
894 ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
895 for (i = 0; i < 5; i++)
896 ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
897
898 /* Save the FIFO state */
899 st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
900 st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
901 st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
902 fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
903
904 for (i = 0; i < ARRAY_SIZE(fifo); i++)
905 fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
906
907 /* Switch to the celsius subchannel */
908 for (i = 0; i < 5; i++)
909 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
910 nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
911 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
912
913 /* Inject NV10TCL_DMA_VTXBUF */
914 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
915 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
916 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
917 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
918 nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
919 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
920 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
921
922 /* Restore the FIFO state */
923 for (i = 0; i < ARRAY_SIZE(fifo); i++)
924 nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
925
926 nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
927 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
928 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
929 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
930
931 /* Restore the current ctx object */
932 for (i = 0; i < 5; i++)
933 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
934 nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
935}
936
937static int
938nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
939{
940 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
941 u32 inst;
942 int i;
943
944 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
945 nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
946
947 if (nv_device(priv)->chipset >= 0x17) {
948 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
949 nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
950 }
951
952 nv10_graph_load_pipe(chan);
953
954 inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
955 nv10_graph_load_dma_vtxbuf(chan, chid, inst);
956
957 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
958 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
959 nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
960 return 0;
961}
962
963static int
964nv10_graph_unload_context(struct nv10_graph_chan *chan)
965{
966 struct nv10_graph_priv *priv = nv10_graph_priv(chan);
967 int i;
968
969 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
970 chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
971
972 if (nv_device(priv)->chipset >= 0x17) {
973 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
974 chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
975 }
976
977 nv10_graph_save_pipe(chan);
978
979 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
980 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
981 return 0;
982}
983
984static void
985nv10_graph_context_switch(struct nv10_graph_priv *priv)
986{
987 struct nv10_graph_chan *prev = NULL;
988 struct nv10_graph_chan *next = NULL;
989 unsigned long flags;
990 int chid;
991
992 spin_lock_irqsave(&priv->lock, flags);
993 nv04_graph_idle(priv);
994
995 /* If previous context is valid, we need to save it */
996 prev = nv10_graph_channel(priv);
997 if (prev)
998 nv10_graph_unload_context(prev);
999
1000 /* load context for next channel */
1001 chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
1002 next = priv->chan[chid];
1003 if (next)
1004 nv10_graph_load_context(next, chid);
1005
1006 spin_unlock_irqrestore(&priv->lock, flags);
1007}
1008
1009#define NV_WRITE_CTX(reg, val) do { \
1010 int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
1011 if (offset > 0) \
1012 chan->nv10[offset] = val; \
1013 } while (0)
1014
1015#define NV17_WRITE_CTX(reg, val) do { \
1016 int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
1017 if (offset > 0) \
1018 chan->nv17[offset] = val; \
1019 } while (0)
1020
1021static int
1022nv10_graph_context_ctor(struct nouveau_object *parent,
1023 struct nouveau_object *engine,
1024 struct nouveau_oclass *oclass, void *data, u32 size,
1025 struct nouveau_object **pobject)
1026{
1027 struct nouveau_fifo_chan *fifo = (void *)parent;
1028 struct nv10_graph_priv *priv = (void *)engine;
1029 struct nv10_graph_chan *chan;
1030 unsigned long flags;
1031 int ret;
1032
1033 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
1034 *pobject = nv_object(chan);
1035 if (ret)
1036 return ret;
1037
1038 spin_lock_irqsave(&priv->lock, flags);
1039 if (priv->chan[fifo->chid]) {
1040 *pobject = nv_object(priv->chan[fifo->chid]);
1041 atomic_inc(&(*pobject)->refcount);
1042 spin_unlock_irqrestore(&priv->lock, flags);
1043 nouveau_object_destroy(&chan->base);
1044 return 1;
1045 }
1046
1047 NV_WRITE_CTX(0x00400e88, 0x08000000);
1048 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
1049 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
1050 NV_WRITE_CTX(0x00400e10, 0x00001000);
1051 NV_WRITE_CTX(0x00400e14, 0x00001000);
1052 NV_WRITE_CTX(0x00400e30, 0x00080008);
1053 NV_WRITE_CTX(0x00400e34, 0x00080008);
1054 if (nv_device(priv)->chipset >= 0x17) {
1055 /* is it really needed ??? */
1056 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
1057 nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
1058 NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
1059 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
1060 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
1061 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
1062 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
1063 }
1064 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
1065
1066 nv10_graph_create_pipe(chan);
1067
1068 priv->chan[fifo->chid] = chan;
1069 chan->chid = fifo->chid;
1070 spin_unlock_irqrestore(&priv->lock, flags);
1071 return 0;
1072}
1073
1074static void
1075nv10_graph_context_dtor(struct nouveau_object *object)
1076{
1077 struct nv10_graph_priv *priv = (void *)object->engine;
1078 struct nv10_graph_chan *chan = (void *)object;
1079 unsigned long flags;
1080
1081 spin_lock_irqsave(&priv->lock, flags);
1082 priv->chan[chan->chid] = NULL;
1083 spin_unlock_irqrestore(&priv->lock, flags);
1084
1085 nouveau_object_destroy(&chan->base);
1086}
1087
1088static int
1089nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
1090{
1091 struct nv10_graph_priv *priv = (void *)object->engine;
1092 struct nv10_graph_chan *chan = (void *)object;
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&priv->lock, flags);
1096 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
1097 if (nv10_graph_channel(priv) == chan)
1098 nv10_graph_unload_context(chan);
1099 nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
1100 spin_unlock_irqrestore(&priv->lock, flags);
1101
1102 return nouveau_object_fini(&chan->base, suspend);
1103}
1104
1105static struct nouveau_oclass
1106nv10_graph_cclass = {
1107 .handle = NV_ENGCTX(GR, 0x10),
1108 .ofuncs = &(struct nouveau_ofuncs) {
1109 .ctor = nv10_graph_context_ctor,
1110 .dtor = nv10_graph_context_dtor,
1111 .init = nouveau_object_init,
1112 .fini = nv10_graph_context_fini,
1113 },
1114};
1115
1116/*******************************************************************************
1117 * PGRAPH engine/subdev functions
1118 ******************************************************************************/
1119
1120static void
1121nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
1122{
1123 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
1124 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
1125 struct nv10_graph_priv *priv = (void *)engine;
1126 unsigned long flags;
1127
1128 pfifo->pause(pfifo, &flags);
1129 nv04_graph_idle(priv);
1130
1131 nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
1132 nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
1133 nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
1134
1135 pfifo->start(pfifo, &flags);
1136}
1137
1138const struct nouveau_bitfield nv10_graph_intr_name[] = {
1139 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1140 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1141 {}
1142};
1143
1144const struct nouveau_bitfield nv10_graph_nstatus[] = {
1145 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1146 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1147 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1148 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1149 {}
1150};
1151
1152static void
1153nv10_graph_intr(struct nouveau_subdev *subdev)
1154{
1155 struct nv10_graph_priv *priv = (void *)subdev;
1156 struct nv10_graph_chan *chan = NULL;
1157 struct nouveau_namedb *namedb = NULL;
1158 struct nouveau_handle *handle = NULL;
1159 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
1160 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
1161 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
1162 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
1163 u32 chid = (addr & 0x01f00000) >> 20;
1164 u32 subc = (addr & 0x00070000) >> 16;
1165 u32 mthd = (addr & 0x00001ffc);
1166 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
1167 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
1168 u32 show = stat;
1169 unsigned long flags;
1170
1171 spin_lock_irqsave(&priv->lock, flags);
1172 chan = priv->chan[chid];
1173 if (chan)
1174 namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
1175 spin_unlock_irqrestore(&priv->lock, flags);
1176
1177 if (stat & NV_PGRAPH_INTR_ERROR) {
1178 if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
1179 handle = nouveau_namedb_get_class(namedb, class);
1180 if (handle && !nv_call(handle->object, mthd, data))
1181 show &= ~NV_PGRAPH_INTR_ERROR;
1182 }
1183 }
1184
1185 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1186 nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1187 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1188 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1189 nv10_graph_context_switch(priv);
1190 }
1191
1192 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
1193 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1194
1195 if (show) {
1196 nv_error(priv, "");
1197 nouveau_bitfield_print(nv10_graph_intr_name, show);
1198 printk(" nsource:");
1199 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1200 printk(" nstatus:");
1201 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1202 printk("\n");
1203 nv_error(priv, "ch %d/%d class 0x%04x "
1204 "mthd 0x%04x data 0x%08x\n",
1205 chid, subc, class, mthd, data);
1206 }
1207
1208 nouveau_namedb_put(handle);
1209}
1210
1211static int
1212nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1213 struct nouveau_oclass *oclass, void *data, u32 size,
1214 struct nouveau_object **pobject)
1215{
1216 struct nv10_graph_priv *priv;
1217 int ret;
1218
1219 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
1220 *pobject = nv_object(priv);
1221 if (ret)
1222 return ret;
1223
1224 nv_subdev(priv)->unit = 0x00001000;
1225 nv_subdev(priv)->intr = nv10_graph_intr;
1226 nv_engine(priv)->cclass = &nv10_graph_cclass;
1227
1228 if (nv_device(priv)->chipset <= 0x10)
1229 nv_engine(priv)->sclass = nv10_graph_sclass;
1230 else
1231 if (nv_device(priv)->chipset < 0x17 ||
1232 nv_device(priv)->chipset == 0x1a)
1233 nv_engine(priv)->sclass = nv15_graph_sclass;
1234 else
1235 nv_engine(priv)->sclass = nv17_graph_sclass;
1236
1237 nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
1238 spin_lock_init(&priv->lock);
1239 return 0;
1240}
1241
1242static void
1243nv10_graph_dtor(struct nouveau_object *object)
1244{
1245 struct nv10_graph_priv *priv = (void *)object;
1246 nouveau_graph_destroy(&priv->base);
1247}
1248
1249static int
1250nv10_graph_init(struct nouveau_object *object)
1251{
1252 struct nouveau_engine *engine = nv_engine(object);
1253 struct nouveau_fb *pfb = nouveau_fb(object);
1254 struct nv10_graph_priv *priv = (void *)engine;
1255 int ret, i;
1256
1257 ret = nouveau_graph_init(&priv->base);
1258 if (ret)
1259 return ret;
1260
1261 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
1262 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
1263
1264 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
1265 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
1266 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
1267 /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
1268 nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
1269 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
1270
1271 if (nv_device(priv)->chipset >= 0x17) {
1272 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
1273 nv_wr32(priv, 0x400a10, 0x03ff3fb6);
1274 nv_wr32(priv, 0x400838, 0x002f8684);
1275 nv_wr32(priv, 0x40083c, 0x00115f3f);
1276 nv_wr32(priv, 0x4006b0, 0x40000020);
1277 } else {
1278 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
1279 }
1280
1281 /* Turn all the tiling regions off. */
1282 for (i = 0; i < pfb->tile.regions; i++)
1283 engine->tile_prog(engine, i);
1284
1285 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
1286 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
1287 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
1288 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
1289 nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
1290 nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
1291
1292 nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
1293 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
1294 nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
1295 return 0;
1296}
1297
1298static int
1299nv10_graph_fini(struct nouveau_object *object, bool suspend)
1300{
1301 struct nv10_graph_priv *priv = (void *)object;
1302 return nouveau_graph_fini(&priv->base, suspend);
1303}
1304
1305struct nouveau_oclass
1306nv10_graph_oclass = {
1307 .handle = NV_ENGINE(GR, 0x10),
1308 .ofuncs = &(struct nouveau_ofuncs) {
1309 .ctor = nv10_graph_ctor,
1310 .dtor = nv10_graph_dtor,
1311 .init = nv10_graph_init,
1312 .fini = nv10_graph_fini,
1313 },
1314};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
new file mode 100644
index 000000000000..8f3f619c4a78
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -0,0 +1,381 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/handle.h>
5#include <core/enum.h>
6
7#include <subdev/timer.h>
8#include <subdev/fb.h>
9
10#include <engine/graph.h>
11#include <engine/fifo.h>
12
13#include "nv20.h"
14#include "regs.h"
15
16/*******************************************************************************
17 * Graphics object classes
18 ******************************************************************************/
19
20static struct nouveau_oclass
21nv20_graph_sclass[] = {
22 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
23 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
24 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
25 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
26 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
27 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
28 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
29 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
30 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
31 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
32 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
33 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
34 { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
36 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv20_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
54 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
55 &chan);
56 *pobject = nv_object(chan);
57 if (ret)
58 return ret;
59
60 chan->chid = nouveau_fifo_chan(parent)->chid;
61
62 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
63 nv_wo32(chan, 0x033c, 0xffff0000);
64 nv_wo32(chan, 0x03a0, 0x0fff0000);
65 nv_wo32(chan, 0x03a4, 0x0fff0000);
66 nv_wo32(chan, 0x047c, 0x00000101);
67 nv_wo32(chan, 0x0490, 0x00000111);
68 nv_wo32(chan, 0x04a8, 0x44400000);
69 for (i = 0x04d4; i <= 0x04e0; i += 4)
70 nv_wo32(chan, i, 0x00030303);
71 for (i = 0x04f4; i <= 0x0500; i += 4)
72 nv_wo32(chan, i, 0x00080000);
73 for (i = 0x050c; i <= 0x0518; i += 4)
74 nv_wo32(chan, i, 0x01012000);
75 for (i = 0x051c; i <= 0x0528; i += 4)
76 nv_wo32(chan, i, 0x000105b8);
77 for (i = 0x052c; i <= 0x0538; i += 4)
78 nv_wo32(chan, i, 0x00080008);
79 for (i = 0x055c; i <= 0x0598; i += 4)
80 nv_wo32(chan, i, 0x07ff0000);
81 nv_wo32(chan, 0x05a4, 0x4b7fffff);
82 nv_wo32(chan, 0x05fc, 0x00000001);
83 nv_wo32(chan, 0x0604, 0x00004000);
84 nv_wo32(chan, 0x0610, 0x00000001);
85 nv_wo32(chan, 0x0618, 0x00040000);
86 nv_wo32(chan, 0x061c, 0x00010000);
87 for (i = 0x1c1c; i <= 0x248c; i += 16) {
88 nv_wo32(chan, (i + 0), 0x10700ff9);
89 nv_wo32(chan, (i + 4), 0x0436086c);
90 nv_wo32(chan, (i + 8), 0x000c001b);
91 }
92 nv_wo32(chan, 0x281c, 0x3f800000);
93 nv_wo32(chan, 0x2830, 0x3f800000);
94 nv_wo32(chan, 0x285c, 0x40000000);
95 nv_wo32(chan, 0x2860, 0x3f800000);
96 nv_wo32(chan, 0x2864, 0x3f000000);
97 nv_wo32(chan, 0x286c, 0x40000000);
98 nv_wo32(chan, 0x2870, 0x3f800000);
99 nv_wo32(chan, 0x2878, 0xbf800000);
100 nv_wo32(chan, 0x2880, 0xbf800000);
101 nv_wo32(chan, 0x34a4, 0x000fe000);
102 nv_wo32(chan, 0x3530, 0x000003f8);
103 nv_wo32(chan, 0x3540, 0x002fe000);
104 for (i = 0x355c; i <= 0x3578; i += 4)
105 nv_wo32(chan, i, 0x001c527c);
106 return 0;
107}
108
109int
110nv20_graph_context_init(struct nouveau_object *object)
111{
112 struct nv20_graph_priv *priv = (void *)object->engine;
113 struct nv20_graph_chan *chan = (void *)object;
114 int ret;
115
116 ret = nouveau_graph_context_init(&chan->base);
117 if (ret)
118 return ret;
119
120 nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
121 return 0;
122}
123
124int
125nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
126{
127 struct nv20_graph_priv *priv = (void *)object->engine;
128 struct nv20_graph_chan *chan = (void *)object;
129 int chid = -1;
130
131 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
132 if (nv_rd32(priv, 0x400144) & 0x00010000)
133 chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
134 if (chan->chid == chid) {
135 nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
136 nv_wr32(priv, 0x400788, 0x00000002);
137 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
138 nv_wr32(priv, 0x400144, 0x10000000);
139 nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
140 }
141 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
142
143 nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
144 return nouveau_graph_context_fini(&chan->base, suspend);
145}
146
147static struct nouveau_oclass
148nv20_graph_cclass = {
149 .handle = NV_ENGCTX(GR, 0x20),
150 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nv20_graph_context_ctor,
152 .dtor = _nouveau_graph_context_dtor,
153 .init = nv20_graph_context_init,
154 .fini = nv20_graph_context_fini,
155 .rd32 = _nouveau_graph_context_rd32,
156 .wr32 = _nouveau_graph_context_wr32,
157 },
158};
159
160/*******************************************************************************
161 * PGRAPH engine/subdev functions
162 ******************************************************************************/
163
164void
165nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
166{
167 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
168 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
169 struct nv20_graph_priv *priv = (void *)engine;
170 unsigned long flags;
171
172 pfifo->pause(pfifo, &flags);
173 nv04_graph_idle(priv);
174
175 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
176 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
177 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
178
179 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
180 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
181 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
182 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
185
186 if (nv_device(engine)->card_type == NV_20) {
187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
190 }
191
192 pfifo->start(pfifo, &flags);
193}
194
195void
196nv20_graph_intr(struct nouveau_subdev *subdev)
197{
198 struct nouveau_engine *engine = nv_engine(subdev);
199 struct nouveau_object *engctx;
200 struct nouveau_handle *handle;
201 struct nv20_graph_priv *priv = (void *)subdev;
202 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
203 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
204 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
205 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
206 u32 chid = (addr & 0x01f00000) >> 20;
207 u32 subc = (addr & 0x00070000) >> 16;
208 u32 mthd = (addr & 0x00001ffc);
209 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
210 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
211 u32 show = stat;
212
213 engctx = nouveau_engctx_get(engine, chid);
214 if (stat & NV_PGRAPH_INTR_ERROR) {
215 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
216 handle = nouveau_handle_get_class(engctx, class);
217 if (handle && !nv_call(handle->object, mthd, data))
218 show &= ~NV_PGRAPH_INTR_ERROR;
219 nouveau_handle_put(handle);
220 }
221 }
222
223 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
225
226 if (show) {
227 nv_info(priv, "");
228 nouveau_bitfield_print(nv10_graph_intr_name, show);
229 printk(" nsource:");
230 nouveau_bitfield_print(nv04_graph_nsource, nsource);
231 printk(" nstatus:");
232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
233 printk("\n");
234 nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
235 chid, subc, class, mthd, data);
236 }
237
238 nouveau_engctx_put(engctx);
239}
240
241static int
242nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
243 struct nouveau_oclass *oclass, void *data, u32 size,
244 struct nouveau_object **pobject)
245{
246 struct nv20_graph_priv *priv;
247 int ret;
248
249 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
250 *pobject = nv_object(priv);
251 if (ret)
252 return ret;
253
254 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
255 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
256 if (ret)
257 return ret;
258
259 nv_subdev(priv)->unit = 0x00001000;
260 nv_subdev(priv)->intr = nv20_graph_intr;
261 nv_engine(priv)->cclass = &nv20_graph_cclass;
262 nv_engine(priv)->sclass = nv20_graph_sclass;
263 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
264 return 0;
265}
266
267void
268nv20_graph_dtor(struct nouveau_object *object)
269{
270 struct nv20_graph_priv *priv = (void *)object;
271 nouveau_gpuobj_ref(NULL, &priv->ctxtab);
272 nouveau_graph_destroy(&priv->base);
273}
274
275int
276nv20_graph_init(struct nouveau_object *object)
277{
278 struct nouveau_engine *engine = nv_engine(object);
279 struct nv20_graph_priv *priv = (void *)engine;
280 struct nouveau_fb *pfb = nouveau_fb(object);
281 u32 tmp, vramsz;
282 int ret, i;
283
284 ret = nouveau_graph_init(&priv->base);
285 if (ret)
286 return ret;
287
288 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
289
290 if (nv_device(priv)->chipset == 0x20) {
291 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
292 for (i = 0; i < 15; i++)
293 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
294 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
295 } else {
296 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
297 for (i = 0; i < 32; i++)
298 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
299 nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
300 }
301
302 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
303 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
304
305 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
306 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
307 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
308 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
309 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
310 nv_wr32(priv, 0x40009C , 0x00000040);
311
312 if (nv_device(priv)->chipset >= 0x25) {
313 nv_wr32(priv, 0x400890, 0x00a8cfff);
314 nv_wr32(priv, 0x400610, 0x304B1FB6);
315 nv_wr32(priv, 0x400B80, 0x1cbd3883);
316 nv_wr32(priv, 0x400B84, 0x44000000);
317 nv_wr32(priv, 0x400098, 0x40000080);
318 nv_wr32(priv, 0x400B88, 0x000000ff);
319
320 } else {
321 nv_wr32(priv, 0x400880, 0x0008c7df);
322 nv_wr32(priv, 0x400094, 0x00000005);
323 nv_wr32(priv, 0x400B80, 0x45eae20e);
324 nv_wr32(priv, 0x400B84, 0x24000000);
325 nv_wr32(priv, 0x400098, 0x00000040);
326 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
327 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
328 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
329 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
330 }
331
332 /* Turn all the tiling regions off. */
333 for (i = 0; i < pfb->tile.regions; i++)
334 engine->tile_prog(engine, i);
335
336 nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
337 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
338 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
339
340 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
341 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
342
343 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
344 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
345 tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
346 nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
347
348 /* begin RAM config */
349 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
350 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
351 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
352 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
353 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
354 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
355 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
356 nv_wr32(priv, 0x400820, 0);
357 nv_wr32(priv, 0x400824, 0);
358 nv_wr32(priv, 0x400864, vramsz - 1);
359 nv_wr32(priv, 0x400868, vramsz - 1);
360
361 /* interesting.. the below overwrites some of the tile setup above.. */
362 nv_wr32(priv, 0x400B20, 0x00000000);
363 nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
364
365 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
366 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
367 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
368 nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
369 return 0;
370}
371
372struct nouveau_oclass
373nv20_graph_oclass = {
374 .handle = NV_ENGINE(GR, 0x20),
375 .ofuncs = &(struct nouveau_ofuncs) {
376 .ctor = nv20_graph_ctor,
377 .dtor = nv20_graph_dtor,
378 .init = nv20_graph_init,
379 .fini = _nouveau_graph_fini,
380 },
381};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 000000000000..2bea7313e03f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
1#ifndef __NV20_GRAPH_H__
2#define __NV20_GRAPH_H__
3
4#include <core/enum.h>
5
6#include <engine/graph.h>
7#include <engine/fifo.h>
8
9struct nv20_graph_priv {
10 struct nouveau_graph base;
11 struct nouveau_gpuobj *ctxtab;
12};
13
14struct nv20_graph_chan {
15 struct nouveau_graph_chan base;
16 int chid;
17};
18
19extern struct nouveau_oclass nv25_graph_sclass[];
20int nv20_graph_context_init(struct nouveau_object *);
21int nv20_graph_context_fini(struct nouveau_object *, bool);
22
23void nv20_graph_tile_prog(struct nouveau_engine *, int);
24void nv20_graph_intr(struct nouveau_subdev *);
25
26void nv20_graph_dtor(struct nouveau_object *);
27int nv20_graph_init(struct nouveau_object *);
28
29int nv30_graph_init(struct nouveau_object *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 000000000000..b2b650dd8b28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18struct nouveau_oclass
19nv25_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
32 { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
33 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
34 { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv25_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x035c, 0xffff0000);
61 nv_wo32(chan, 0x03c0, 0x0fff0000);
62 nv_wo32(chan, 0x03c4, 0x0fff0000);
63 nv_wo32(chan, 0x049c, 0x00000101);
64 nv_wo32(chan, 0x04b0, 0x00000111);
65 nv_wo32(chan, 0x04c8, 0x00000080);
66 nv_wo32(chan, 0x04cc, 0xffff0000);
67 nv_wo32(chan, 0x04d0, 0x00000001);
68 nv_wo32(chan, 0x04e4, 0x44400000);
69 nv_wo32(chan, 0x04fc, 0x4b800000);
70 for (i = 0x0510; i <= 0x051c; i += 4)
71 nv_wo32(chan, i, 0x00030303);
72 for (i = 0x0530; i <= 0x053c; i += 4)
73 nv_wo32(chan, i, 0x00080000);
74 for (i = 0x0548; i <= 0x0554; i += 4)
75 nv_wo32(chan, i, 0x01012000);
76 for (i = 0x0558; i <= 0x0564; i += 4)
77 nv_wo32(chan, i, 0x000105b8);
78 for (i = 0x0568; i <= 0x0574; i += 4)
79 nv_wo32(chan, i, 0x00080008);
80 for (i = 0x0598; i <= 0x05d4; i += 4)
81 nv_wo32(chan, i, 0x07ff0000);
82 nv_wo32(chan, 0x05e0, 0x4b7fffff);
83 nv_wo32(chan, 0x0620, 0x00000080);
84 nv_wo32(chan, 0x0624, 0x30201000);
85 nv_wo32(chan, 0x0628, 0x70605040);
86 nv_wo32(chan, 0x062c, 0xb0a09080);
87 nv_wo32(chan, 0x0630, 0xf0e0d0c0);
88 nv_wo32(chan, 0x0664, 0x00000001);
89 nv_wo32(chan, 0x066c, 0x00004000);
90 nv_wo32(chan, 0x0678, 0x00000001);
91 nv_wo32(chan, 0x0680, 0x00040000);
92 nv_wo32(chan, 0x0684, 0x00010000);
93 for (i = 0x1b04; i <= 0x2374; i += 16) {
94 nv_wo32(chan, (i + 0), 0x10700ff9);
95 nv_wo32(chan, (i + 4), 0x0436086c);
96 nv_wo32(chan, (i + 8), 0x000c001b);
97 }
98 nv_wo32(chan, 0x2704, 0x3f800000);
99 nv_wo32(chan, 0x2718, 0x3f800000);
100 nv_wo32(chan, 0x2744, 0x40000000);
101 nv_wo32(chan, 0x2748, 0x3f800000);
102 nv_wo32(chan, 0x274c, 0x3f000000);
103 nv_wo32(chan, 0x2754, 0x40000000);
104 nv_wo32(chan, 0x2758, 0x3f800000);
105 nv_wo32(chan, 0x2760, 0xbf800000);
106 nv_wo32(chan, 0x2768, 0xbf800000);
107 nv_wo32(chan, 0x308c, 0x000fe000);
108 nv_wo32(chan, 0x3108, 0x000003f8);
109 nv_wo32(chan, 0x3468, 0x002fe000);
110 for (i = 0x3484; i <= 0x34a0; i += 4)
111 nv_wo32(chan, i, 0x001c527c);
112 return 0;
113}
114
115static struct nouveau_oclass
116nv25_graph_cclass = {
117 .handle = NV_ENGCTX(GR, 0x25),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv25_graph_context_ctor,
120 .dtor = _nouveau_graph_context_dtor,
121 .init = nv20_graph_context_init,
122 .fini = nv20_graph_context_fini,
123 .rd32 = _nouveau_graph_context_rd32,
124 .wr32 = _nouveau_graph_context_wr32,
125 },
126};
127
128/*******************************************************************************
129 * PGRAPH engine/subdev functions
130 ******************************************************************************/
131
132static int
133nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
134 struct nouveau_oclass *oclass, void *data, u32 size,
135 struct nouveau_object **pobject)
136{
137 struct nv20_graph_priv *priv;
138 int ret;
139
140 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
141 *pobject = nv_object(priv);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
147 if (ret)
148 return ret;
149
150 nv_subdev(priv)->unit = 0x00001000;
151 nv_subdev(priv)->intr = nv20_graph_intr;
152 nv_engine(priv)->cclass = &nv25_graph_cclass;
153 nv_engine(priv)->sclass = nv25_graph_sclass;
154 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
155 return 0;
156}
157
158struct nouveau_oclass
159nv25_graph_oclass = {
160 .handle = NV_ENGINE(GR, 0x25),
161 .ofuncs = &(struct nouveau_ofuncs) {
162 .ctor = nv25_graph_ctor,
163 .dtor = nv20_graph_dtor,
164 .init = nv20_graph_init,
165 .fini = _nouveau_graph_fini,
166 },
167};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 000000000000..700462fa0ae0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * PGRAPH context
16 ******************************************************************************/
17
18static int
19nv2a_graph_context_ctor(struct nouveau_object *parent,
20 struct nouveau_object *engine,
21 struct nouveau_oclass *oclass, void *data, u32 size,
22 struct nouveau_object **pobject)
23{
24 struct nv20_graph_chan *chan;
25 int ret, i;
26
27 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
28 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
29 *pobject = nv_object(chan);
30 if (ret)
31 return ret;
32
33 chan->chid = nouveau_fifo_chan(parent)->chid;
34
35 nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
36 nv_wo32(chan, 0x033c, 0xffff0000);
37 nv_wo32(chan, 0x03a0, 0x0fff0000);
38 nv_wo32(chan, 0x03a4, 0x0fff0000);
39 nv_wo32(chan, 0x047c, 0x00000101);
40 nv_wo32(chan, 0x0490, 0x00000111);
41 nv_wo32(chan, 0x04a8, 0x44400000);
42 for (i = 0x04d4; i <= 0x04e0; i += 4)
43 nv_wo32(chan, i, 0x00030303);
44 for (i = 0x04f4; i <= 0x0500; i += 4)
45 nv_wo32(chan, i, 0x00080000);
46 for (i = 0x050c; i <= 0x0518; i += 4)
47 nv_wo32(chan, i, 0x01012000);
48 for (i = 0x051c; i <= 0x0528; i += 4)
49 nv_wo32(chan, i, 0x000105b8);
50 for (i = 0x052c; i <= 0x0538; i += 4)
51 nv_wo32(chan, i, 0x00080008);
52 for (i = 0x055c; i <= 0x0598; i += 4)
53 nv_wo32(chan, i, 0x07ff0000);
54 nv_wo32(chan, 0x05a4, 0x4b7fffff);
55 nv_wo32(chan, 0x05fc, 0x00000001);
56 nv_wo32(chan, 0x0604, 0x00004000);
57 nv_wo32(chan, 0x0610, 0x00000001);
58 nv_wo32(chan, 0x0618, 0x00040000);
59 nv_wo32(chan, 0x061c, 0x00010000);
60 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
61 nv_wo32(chan, (i + 0), 0x10700ff9);
62 nv_wo32(chan, (i + 4), 0x0436086c);
63 nv_wo32(chan, (i + 8), 0x000c001b);
64 }
65 nv_wo32(chan, 0x269c, 0x3f800000);
66 nv_wo32(chan, 0x26b0, 0x3f800000);
67 nv_wo32(chan, 0x26dc, 0x40000000);
68 nv_wo32(chan, 0x26e0, 0x3f800000);
69 nv_wo32(chan, 0x26e4, 0x3f000000);
70 nv_wo32(chan, 0x26ec, 0x40000000);
71 nv_wo32(chan, 0x26f0, 0x3f800000);
72 nv_wo32(chan, 0x26f8, 0xbf800000);
73 nv_wo32(chan, 0x2700, 0xbf800000);
74 nv_wo32(chan, 0x3024, 0x000fe000);
75 nv_wo32(chan, 0x30a0, 0x000003f8);
76 nv_wo32(chan, 0x33fc, 0x002fe000);
77 for (i = 0x341c; i <= 0x3438; i += 4)
78 nv_wo32(chan, i, 0x001c527c);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv2a_graph_cclass = {
84 .handle = NV_ENGCTX(GR, 0x2a),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv2a_graph_context_ctor,
87 .dtor = _nouveau_graph_context_dtor,
88 .init = nv20_graph_context_init,
89 .fini = nv20_graph_context_fini,
90 .rd32 = _nouveau_graph_context_rd32,
91 .wr32 = _nouveau_graph_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PGRAPH engine/subdev functions
97 ******************************************************************************/
98
99static int
100nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nv20_graph_priv *priv;
105 int ret;
106
107 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
113 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
114 if (ret)
115 return ret;
116
117 nv_subdev(priv)->unit = 0x00001000;
118 nv_subdev(priv)->intr = nv20_graph_intr;
119 nv_engine(priv)->cclass = &nv2a_graph_cclass;
120 nv_engine(priv)->sclass = nv25_graph_sclass;
121 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
122 return 0;
123}
124
125struct nouveau_oclass
126nv2a_graph_oclass = {
127 .handle = NV_ENGINE(GR, 0x2a),
128 .ofuncs = &(struct nouveau_ofuncs) {
129 .ctor = nv2a_graph_ctor,
130 .dtor = nv20_graph_dtor,
131 .init = nv20_graph_init,
132 .fini = _nouveau_graph_fini,
133 },
134};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 000000000000..cedadaa92d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv30_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv30_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x0410, 0x00000101);
63 nv_wo32(chan, 0x0424, 0x00000111);
64 nv_wo32(chan, 0x0428, 0x00000060);
65 nv_wo32(chan, 0x0444, 0x00000080);
66 nv_wo32(chan, 0x0448, 0xffff0000);
67 nv_wo32(chan, 0x044c, 0x00000001);
68 nv_wo32(chan, 0x0460, 0x44400000);
69 nv_wo32(chan, 0x048c, 0xffff0000);
70 for (i = 0x04e0; i < 0x04e8; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04ec, 0x00011100);
73 for (i = 0x0508; i < 0x0548; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0550, 0x4b7fffff);
76 nv_wo32(chan, 0x058c, 0x00000080);
77 nv_wo32(chan, 0x0590, 0x30201000);
78 nv_wo32(chan, 0x0594, 0x70605040);
79 nv_wo32(chan, 0x0598, 0xb8a89888);
80 nv_wo32(chan, 0x059c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05b0, 0xb0000000);
82 for (i = 0x0600; i < 0x0640; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0640; i < 0x0680; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06c0; i < 0x0700; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x0700; i < 0x0740; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0740; i < 0x0780; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x085c, 0x00040000);
93 nv_wo32(chan, 0x0860, 0x00010000);
94 for (i = 0x0864; i < 0x0874; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x30b8; i < 0x30c8; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x344c, 0x3f800000);
104 nv_wo32(chan, 0x3808, 0x3f800000);
105 nv_wo32(chan, 0x381c, 0x3f800000);
106 nv_wo32(chan, 0x3848, 0x40000000);
107 nv_wo32(chan, 0x384c, 0x3f800000);
108 nv_wo32(chan, 0x3850, 0x3f000000);
109 nv_wo32(chan, 0x3858, 0x40000000);
110 nv_wo32(chan, 0x385c, 0x3f800000);
111 nv_wo32(chan, 0x3864, 0xbf800000);
112 nv_wo32(chan, 0x386c, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv30_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x30),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv30_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv30_graph_cclass;
154 nv_engine(priv)->sclass = nv30_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159int
160nv30_graph_init(struct nouveau_object *object)
161{
162 struct nouveau_engine *engine = nv_engine(object);
163 struct nv20_graph_priv *priv = (void *)engine;
164 struct nouveau_fb *pfb = nouveau_fb(object);
165 int ret, i;
166
167 ret = nouveau_graph_init(&priv->base);
168 if (ret)
169 return ret;
170
171 nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
172
173 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
174 nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
175
176 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
177 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
178 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
179 nv_wr32(priv, 0x400890, 0x01b463ff);
180 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
181 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
182 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
183 nv_wr32(priv, 0x400B80, 0x1003d888);
184 nv_wr32(priv, 0x400B84, 0x0c000000);
185 nv_wr32(priv, 0x400098, 0x00000000);
186 nv_wr32(priv, 0x40009C, 0x0005ad00);
187 nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
188 nv_wr32(priv, 0x4000a0, 0x00000000);
189 nv_wr32(priv, 0x4000a4, 0x00000008);
190 nv_wr32(priv, 0x4008a8, 0xb784a400);
191 nv_wr32(priv, 0x400ba0, 0x002f8685);
192 nv_wr32(priv, 0x400ba4, 0x00231f3f);
193 nv_wr32(priv, 0x4008a4, 0x40000020);
194
195 if (nv_device(priv)->chipset == 0x34) {
196 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
197 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
198 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
199 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
200 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
201 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
202 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
203 nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
204 }
205
206 nv_wr32(priv, 0x4000c0, 0x00000016);
207
208 /* Turn all the tiling regions off. */
209 for (i = 0; i < pfb->tile.regions; i++)
210 engine->tile_prog(engine, i);
211
212 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
213 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
214 nv_wr32(priv, 0x0040075c , 0x00000001);
215
216 /* begin RAM config */
217 /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
218 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
219 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
220 if (nv_device(priv)->chipset != 0x34) {
221 nv_wr32(priv, 0x400750, 0x00EA0000);
222 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
223 nv_wr32(priv, 0x400750, 0x00EA0004);
224 nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
225 }
226 return 0;
227}
228
229struct nouveau_oclass
230nv30_graph_oclass = {
231 .handle = NV_ENGINE(GR, 0x30),
232 .ofuncs = &(struct nouveau_ofuncs) {
233 .ctor = nv30_graph_ctor,
234 .dtor = nv20_graph_dtor,
235 .init = nv30_graph_init,
236 .fini = _nouveau_graph_fini,
237 },
238};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 000000000000..273f6320027b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include <engine/graph.h>
10
11#include "nv20.h"
12#include "regs.h"
13
14/*******************************************************************************
15 * Graphics object classes
16 ******************************************************************************/
17
18static struct nouveau_oclass
19nv34_graph_sclass[] = {
20 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
21 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
22 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
23 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
24 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
25 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
26 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
27 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
28 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
29 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
30 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
31 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
32 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
33 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
34 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
35 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
36 { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
37 {},
38};
39
40/*******************************************************************************
41 * PGRAPH context
42 ******************************************************************************/
43
44static int
45nv34_graph_context_ctor(struct nouveau_object *parent,
46 struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv20_graph_chan *chan;
51 int ret, i;
52
53 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
54 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
55 *pobject = nv_object(chan);
56 if (ret)
57 return ret;
58
59 chan->chid = nouveau_fifo_chan(parent)->chid;
60
61 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
62 nv_wo32(chan, 0x040c, 0x01000101);
63 nv_wo32(chan, 0x0420, 0x00000111);
64 nv_wo32(chan, 0x0424, 0x00000060);
65 nv_wo32(chan, 0x0440, 0x00000080);
66 nv_wo32(chan, 0x0444, 0xffff0000);
67 nv_wo32(chan, 0x0448, 0x00000001);
68 nv_wo32(chan, 0x045c, 0x44400000);
69 nv_wo32(chan, 0x0480, 0xffff0000);
70 for (i = 0x04d4; i < 0x04dc; i += 4)
71 nv_wo32(chan, i, 0x0fff0000);
72 nv_wo32(chan, 0x04e0, 0x00011100);
73 for (i = 0x04fc; i < 0x053c; i += 4)
74 nv_wo32(chan, i, 0x07ff0000);
75 nv_wo32(chan, 0x0544, 0x4b7fffff);
76 nv_wo32(chan, 0x057c, 0x00000080);
77 nv_wo32(chan, 0x0580, 0x30201000);
78 nv_wo32(chan, 0x0584, 0x70605040);
79 nv_wo32(chan, 0x0588, 0xb8a89888);
80 nv_wo32(chan, 0x058c, 0xf8e8d8c8);
81 nv_wo32(chan, 0x05a0, 0xb0000000);
82 for (i = 0x05f0; i < 0x0630; i += 4)
83 nv_wo32(chan, i, 0x00010588);
84 for (i = 0x0630; i < 0x0670; i += 4)
85 nv_wo32(chan, i, 0x00030303);
86 for (i = 0x06b0; i < 0x06f0; i += 4)
87 nv_wo32(chan, i, 0x0008aae4);
88 for (i = 0x06f0; i < 0x0730; i += 4)
89 nv_wo32(chan, i, 0x01012000);
90 for (i = 0x0730; i < 0x0770; i += 4)
91 nv_wo32(chan, i, 0x00080008);
92 nv_wo32(chan, 0x0850, 0x00040000);
93 nv_wo32(chan, 0x0854, 0x00010000);
94 for (i = 0x0858; i < 0x0868; i += 4)
95 nv_wo32(chan, i, 0x00040004);
96 for (i = 0x15ac; i <= 0x271c ; i += 16) {
97 nv_wo32(chan, i + 0, 0x10700ff9);
98 nv_wo32(chan, i + 1, 0x0436086c);
99 nv_wo32(chan, i + 2, 0x000c001b);
100 }
101 for (i = 0x274c; i < 0x275c; i += 4)
102 nv_wo32(chan, i, 0x0000ffff);
103 nv_wo32(chan, 0x2ae0, 0x3f800000);
104 nv_wo32(chan, 0x2e9c, 0x3f800000);
105 nv_wo32(chan, 0x2eb0, 0x3f800000);
106 nv_wo32(chan, 0x2edc, 0x40000000);
107 nv_wo32(chan, 0x2ee0, 0x3f800000);
108 nv_wo32(chan, 0x2ee4, 0x3f000000);
109 nv_wo32(chan, 0x2eec, 0x40000000);
110 nv_wo32(chan, 0x2ef0, 0x3f800000);
111 nv_wo32(chan, 0x2ef8, 0xbf800000);
112 nv_wo32(chan, 0x2f00, 0xbf800000);
113 return 0;
114}
115
116static struct nouveau_oclass
117nv34_graph_cclass = {
118 .handle = NV_ENGCTX(GR, 0x34),
119 .ofuncs = &(struct nouveau_ofuncs) {
120 .ctor = nv34_graph_context_ctor,
121 .dtor = _nouveau_graph_context_dtor,
122 .init = nv20_graph_context_init,
123 .fini = nv20_graph_context_fini,
124 .rd32 = _nouveau_graph_context_rd32,
125 .wr32 = _nouveau_graph_context_wr32,
126 },
127};
128
129/*******************************************************************************
130 * PGRAPH engine/subdev functions
131 ******************************************************************************/
132
133static int
134nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv20_graph_priv *priv;
139 int ret;
140
141 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
147 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
148 if (ret)
149 return ret;
150
151 nv_subdev(priv)->unit = 0x00001000;
152 nv_subdev(priv)->intr = nv20_graph_intr;
153 nv_engine(priv)->cclass = &nv34_graph_cclass;
154 nv_engine(priv)->sclass = nv34_graph_sclass;
155 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
156 return 0;
157}
158
159struct nouveau_oclass
160nv34_graph_oclass = {
161 .handle = NV_ENGINE(GR, 0x34),
162 .ofuncs = &(struct nouveau_ofuncs) {
163 .ctor = nv34_graph_ctor,
164 .dtor = nv20_graph_dtor,
165 .init = nv30_graph_init,
166 .fini = _nouveau_graph_fini,
167 },
168};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 000000000000..f40ee2116ee1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
1#include <core/os.h>
2#include <core/class.h>
3#include <core/engctx.h>
4#include <core/enum.h>
5
6#include <subdev/timer.h>
7#include <subdev/fb.h>
8
9#include "nv20.h"
10#include "regs.h"
11
12/*******************************************************************************
13 * Graphics object classes
14 ******************************************************************************/
15
16static struct nouveau_oclass
17nv35_graph_sclass[] = {
18 { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
19 { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
20 { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
21 { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
22 { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
23 { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
24 { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
25 { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
26 { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
27 { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
28 { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
29 { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
30 { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
31 { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
32 { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
33 { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
34 { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
35 {},
36};
37
38/*******************************************************************************
39 * PGRAPH context
40 ******************************************************************************/
41
42static int
43nv35_graph_context_ctor(struct nouveau_object *parent,
44 struct nouveau_object *engine,
45 struct nouveau_oclass *oclass, void *data, u32 size,
46 struct nouveau_object **pobject)
47{
48 struct nv20_graph_chan *chan;
49 int ret, i;
50
51 ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
52 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
53 *pobject = nv_object(chan);
54 if (ret)
55 return ret;
56
57 chan->chid = nouveau_fifo_chan(parent)->chid;
58
59 nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
60 nv_wo32(chan, 0x040c, 0x00000101);
61 nv_wo32(chan, 0x0420, 0x00000111);
62 nv_wo32(chan, 0x0424, 0x00000060);
63 nv_wo32(chan, 0x0440, 0x00000080);
64 nv_wo32(chan, 0x0444, 0xffff0000);
65 nv_wo32(chan, 0x0448, 0x00000001);
66 nv_wo32(chan, 0x045c, 0x44400000);
67 nv_wo32(chan, 0x0488, 0xffff0000);
68 for (i = 0x04dc; i < 0x04e4; i += 4)
69 nv_wo32(chan, i, 0x0fff0000);
70 nv_wo32(chan, 0x04e8, 0x00011100);
71 for (i = 0x0504; i < 0x0544; i += 4)
72 nv_wo32(chan, i, 0x07ff0000);
73 nv_wo32(chan, 0x054c, 0x4b7fffff);
74 nv_wo32(chan, 0x0588, 0x00000080);
75 nv_wo32(chan, 0x058c, 0x30201000);
76 nv_wo32(chan, 0x0590, 0x70605040);
77 nv_wo32(chan, 0x0594, 0xb8a89888);
78 nv_wo32(chan, 0x0598, 0xf8e8d8c8);
79 nv_wo32(chan, 0x05ac, 0xb0000000);
80 for (i = 0x0604; i < 0x0644; i += 4)
81 nv_wo32(chan, i, 0x00010588);
82 for (i = 0x0644; i < 0x0684; i += 4)
83 nv_wo32(chan, i, 0x00030303);
84 for (i = 0x06c4; i < 0x0704; i += 4)
85 nv_wo32(chan, i, 0x0008aae4);
86 for (i = 0x0704; i < 0x0744; i += 4)
87 nv_wo32(chan, i, 0x01012000);
88 for (i = 0x0744; i < 0x0784; i += 4)
89 nv_wo32(chan, i, 0x00080008);
90 nv_wo32(chan, 0x0860, 0x00040000);
91 nv_wo32(chan, 0x0864, 0x00010000);
92 for (i = 0x0868; i < 0x0878; i += 4)
93 nv_wo32(chan, i, 0x00040004);
94 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
95 nv_wo32(chan, i + 0, 0x10700ff9);
96 nv_wo32(chan, i + 4, 0x0436086c);
97 nv_wo32(chan, i + 8, 0x000c001b);
98 }
99 for (i = 0x30bc; i < 0x30cc; i += 4)
100 nv_wo32(chan, i, 0x0000ffff);
101 nv_wo32(chan, 0x3450, 0x3f800000);
102 nv_wo32(chan, 0x380c, 0x3f800000);
103 nv_wo32(chan, 0x3820, 0x3f800000);
104 nv_wo32(chan, 0x384c, 0x40000000);
105 nv_wo32(chan, 0x3850, 0x3f800000);
106 nv_wo32(chan, 0x3854, 0x3f000000);
107 nv_wo32(chan, 0x385c, 0x40000000);
108 nv_wo32(chan, 0x3860, 0x3f800000);
109 nv_wo32(chan, 0x3868, 0xbf800000);
110 nv_wo32(chan, 0x3870, 0xbf800000);
111 return 0;
112}
113
114static struct nouveau_oclass
115nv35_graph_cclass = {
116 .handle = NV_ENGCTX(GR, 0x35),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nv35_graph_context_ctor,
119 .dtor = _nouveau_graph_context_dtor,
120 .init = nv20_graph_context_init,
121 .fini = nv20_graph_context_fini,
122 .rd32 = _nouveau_graph_context_rd32,
123 .wr32 = _nouveau_graph_context_wr32,
124 },
125};
126
127/*******************************************************************************
128 * PGRAPH engine/subdev functions
129 ******************************************************************************/
130
131static int
132nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
133 struct nouveau_oclass *oclass, void *data, u32 size,
134 struct nouveau_object **pobject)
135{
136 struct nv20_graph_priv *priv;
137 int ret;
138
139 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
140 *pobject = nv_object(priv);
141 if (ret)
142 return ret;
143
144 ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
145 NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
146 if (ret)
147 return ret;
148
149 nv_subdev(priv)->unit = 0x00001000;
150 nv_subdev(priv)->intr = nv20_graph_intr;
151 nv_engine(priv)->cclass = &nv35_graph_cclass;
152 nv_engine(priv)->sclass = nv35_graph_sclass;
153 nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
154 return 0;
155}
156
157struct nouveau_oclass
158nv35_graph_oclass = {
159 .handle = NV_ENGINE(GR, 0x35),
160 .ofuncs = &(struct nouveau_ofuncs) {
161 .ctor = nv35_graph_ctor,
162 .dtor = nv20_graph_dtor,
163 .init = nv30_graph_init,
164 .fini = _nouveau_graph_fini,
165 },
166};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
new file mode 100644
index 000000000000..8d0021049ec0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -0,0 +1,495 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/engctx.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32
33#include <engine/graph.h>
34#include <engine/fifo.h>
35
36#include "nv40.h"
37#include "regs.h"
38
39struct nv40_graph_priv {
40 struct nouveau_graph base;
41 u32 size;
42};
43
44struct nv40_graph_chan {
45 struct nouveau_graph_chan base;
46};
47
48/*******************************************************************************
49 * Graphics object classes
50 ******************************************************************************/
51
52static int
53nv40_graph_object_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nouveau_gpuobj *obj;
59 int ret;
60
61 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
62 20, 16, 0, &obj);
63 *pobject = nv_object(obj);
64 if (ret)
65 return ret;
66
67 nv_wo32(obj, 0x00, nv_mclass(obj));
68 nv_wo32(obj, 0x04, 0x00000000);
69 nv_wo32(obj, 0x08, 0x00000000);
70#ifdef __BIG_ENDIAN
71 nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
72#endif
73 nv_wo32(obj, 0x0c, 0x00000000);
74 nv_wo32(obj, 0x10, 0x00000000);
75 return 0;
76}
77
78static struct nouveau_ofuncs
79nv40_graph_ofuncs = {
80 .ctor = nv40_graph_object_ctor,
81 .dtor = _nouveau_gpuobj_dtor,
82 .init = _nouveau_gpuobj_init,
83 .fini = _nouveau_gpuobj_fini,
84 .rd32 = _nouveau_gpuobj_rd32,
85 .wr32 = _nouveau_gpuobj_wr32,
86};
87
88static struct nouveau_oclass
89nv40_graph_sclass[] = {
90 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
91 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
92 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
93 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
94 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
95 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
96 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
97 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
98 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
99 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
100 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
101 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
102 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
103 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
104 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
105 { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
106 {},
107};
108
109static struct nouveau_oclass
110nv44_graph_sclass[] = {
111 { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
112 { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
113 { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
114 { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
115 { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
116 { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
117 { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
118 { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
119 { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
120 { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
121 { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
122 { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
123 { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
124 { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
125 { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
126 { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
127 {},
128};
129
130/*******************************************************************************
131 * PGRAPH context
132 ******************************************************************************/
133
134static int
135nv40_graph_context_ctor(struct nouveau_object *parent,
136 struct nouveau_object *engine,
137 struct nouveau_oclass *oclass, void *data, u32 size,
138 struct nouveau_object **pobject)
139{
140 struct nv40_graph_priv *priv = (void *)engine;
141 struct nv40_graph_chan *chan;
142 int ret;
143
144 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
145 priv->size, 16,
146 NVOBJ_FLAG_ZERO_ALLOC, &chan);
147 *pobject = nv_object(chan);
148 if (ret)
149 return ret;
150
151 nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
152 nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
153 return 0;
154}
155
156static int
157nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
158{
159 struct nv04_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0;
163
164 nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
165
166 if (nv_rd32(priv, 0x40032c) == inst) {
167 if (suspend) {
168 nv_wr32(priv, 0x400720, 0x00000000);
169 nv_wr32(priv, 0x400784, inst);
170 nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
171 nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
172 if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
173 u32 insn = nv_rd32(priv, 0x400308);
174 nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
175 ret = -EBUSY;
176 }
177 }
178
179 nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
180 }
181
182 if (nv_rd32(priv, 0x400330) == inst)
183 nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
184
185 nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
186 return ret;
187}
188
189static struct nouveau_oclass
190nv40_graph_cclass = {
191 .handle = NV_ENGCTX(GR, 0x40),
192 .ofuncs = &(struct nouveau_ofuncs) {
193 .ctor = nv40_graph_context_ctor,
194 .dtor = _nouveau_graph_context_dtor,
195 .init = _nouveau_graph_context_init,
196 .fini = nv40_graph_context_fini,
197 .rd32 = _nouveau_graph_context_rd32,
198 .wr32 = _nouveau_graph_context_wr32,
199 },
200};
201
202/*******************************************************************************
203 * PGRAPH engine/subdev functions
204 ******************************************************************************/
205
206static void
207nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
208{
209 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
210 struct nouveau_fifo *pfifo = nouveau_fifo(engine);
211 struct nv40_graph_priv *priv = (void *)engine;
212 unsigned long flags;
213
214 pfifo->pause(pfifo, &flags);
215 nv04_graph_idle(priv);
216
217 switch (nv_device(priv)->chipset) {
218 case 0x40:
219 case 0x41: /* guess */
220 case 0x42:
221 case 0x43:
222 case 0x45: /* guess */
223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
226 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 break;
231 case 0x44:
232 case 0x4a:
233 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
234 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break;
237 case 0x46:
238 case 0x47:
239 case 0x49:
240 case 0x4b:
241 case 0x4c:
242 case 0x67:
243 default:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
250 break;
251 }
252
253 pfifo->start(pfifo, &flags);
254}
255
256static void
257nv40_graph_intr(struct nouveau_subdev *subdev)
258{
259 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
260 struct nouveau_engine *engine = nv_engine(subdev);
261 struct nouveau_object *engctx;
262 struct nouveau_handle *handle = NULL;
263 struct nv40_graph_priv *priv = (void *)subdev;
264 u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
265 u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
266 u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
267 u32 inst = nv_rd32(priv, 0x40032c) & 0x000fffff;
268 u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
269 u32 subc = (addr & 0x00070000) >> 16;
270 u32 mthd = (addr & 0x00001ffc);
271 u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
272 u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
273 u32 show = stat;
274 int chid;
275
276 engctx = nouveau_engctx_get(engine, inst);
277 chid = pfifo->chid(pfifo, engctx);
278
279 if (stat & NV_PGRAPH_INTR_ERROR) {
280 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
281 handle = nouveau_handle_get_class(engctx, class);
282 if (handle && !nv_call(handle->object, mthd, data))
283 show &= ~NV_PGRAPH_INTR_ERROR;
284 nouveau_handle_put(handle);
285 }
286
287 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
288 nv_mask(priv, 0x402000, 0, 0);
289 }
290 }
291
292 nv_wr32(priv, NV03_PGRAPH_INTR, stat);
293 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
294
295 if (show) {
296 nv_info(priv, "");
297 nouveau_bitfield_print(nv10_graph_intr_name, show);
298 printk(" nsource:");
299 nouveau_bitfield_print(nv04_graph_nsource, nsource);
300 printk(" nstatus:");
301 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
302 printk("\n");
303 nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x "
304 "mthd 0x%04x data 0x%08x\n",
305 chid, inst << 4, subc, class, mthd, data);
306 }
307
308 nouveau_engctx_put(engctx);
309}
310
311static int
312nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
313 struct nouveau_oclass *oclass, void *data, u32 size,
314 struct nouveau_object **pobject)
315{
316 struct nv40_graph_priv *priv;
317 int ret;
318
319 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
320 *pobject = nv_object(priv);
321 if (ret)
322 return ret;
323
324 nv_subdev(priv)->unit = 0x00001000;
325 nv_subdev(priv)->intr = nv40_graph_intr;
326 nv_engine(priv)->cclass = &nv40_graph_cclass;
327 if (nv44_graph_class(priv))
328 nv_engine(priv)->sclass = nv44_graph_sclass;
329 else
330 nv_engine(priv)->sclass = nv40_graph_sclass;
331 nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
332 return 0;
333}
334
335static int
336nv40_graph_init(struct nouveau_object *object)
337{
338 struct nouveau_engine *engine = nv_engine(object);
339 struct nouveau_fb *pfb = nouveau_fb(object);
340 struct nv40_graph_priv *priv = (void *)engine;
341 int ret, i, j;
342 u32 vramsz;
343
344 ret = nouveau_graph_init(&priv->base);
345 if (ret)
346 return ret;
347
348 /* generate and upload context program */
349 nv40_grctx_init(nv_device(priv), &priv->size);
350
351 /* No context present currently */
352 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
353
354 nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
355 nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
356
357 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
358 nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
359 nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
360 nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
361 nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
362 nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
363
364 nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
365 nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
366
367 j = nv_rd32(priv, 0x1540) & 0xff;
368 if (j) {
369 for (i = 0; !(j & 1); j >>= 1, i++)
370 ;
371 nv_wr32(priv, 0x405000, i);
372 }
373
374 if (nv_device(priv)->chipset == 0x40) {
375 nv_wr32(priv, 0x4009b0, 0x83280fff);
376 nv_wr32(priv, 0x4009b4, 0x000000a0);
377 } else {
378 nv_wr32(priv, 0x400820, 0x83280eff);
379 nv_wr32(priv, 0x400824, 0x000000a0);
380 }
381
382 switch (nv_device(priv)->chipset) {
383 case 0x40:
384 case 0x45:
385 nv_wr32(priv, 0x4009b8, 0x0078e366);
386 nv_wr32(priv, 0x4009bc, 0x0000014c);
387 break;
388 case 0x41:
389 case 0x42: /* pciid also 0x00Cx */
390 /* case 0x0120: XXX (pciid) */
391 nv_wr32(priv, 0x400828, 0x007596ff);
392 nv_wr32(priv, 0x40082c, 0x00000108);
393 break;
394 case 0x43:
395 nv_wr32(priv, 0x400828, 0x0072cb77);
396 nv_wr32(priv, 0x40082c, 0x00000108);
397 break;
398 case 0x44:
399 case 0x46: /* G72 */
400 case 0x4a:
401 case 0x4c: /* G7x-based C51 */
402 case 0x4e:
403 nv_wr32(priv, 0x400860, 0);
404 nv_wr32(priv, 0x400864, 0);
405 break;
406 case 0x47: /* G70 */
407 case 0x49: /* G71 */
408 case 0x4b: /* G73 */
409 nv_wr32(priv, 0x400828, 0x07830610);
410 nv_wr32(priv, 0x40082c, 0x0000016A);
411 break;
412 default:
413 break;
414 }
415
416 nv_wr32(priv, 0x400b38, 0x2ffff800);
417 nv_wr32(priv, 0x400b3c, 0x00006000);
418
419 /* Tiling related stuff. */
420 switch (nv_device(priv)->chipset) {
421 case 0x44:
422 case 0x4a:
423 nv_wr32(priv, 0x400bc4, 0x1003d888);
424 nv_wr32(priv, 0x400bbc, 0xb7a7b500);
425 break;
426 case 0x46:
427 nv_wr32(priv, 0x400bc4, 0x0000e024);
428 nv_wr32(priv, 0x400bbc, 0xb7a7b520);
429 break;
430 case 0x4c:
431 case 0x4e:
432 case 0x67:
433 nv_wr32(priv, 0x400bc4, 0x1003d888);
434 nv_wr32(priv, 0x400bbc, 0xb7a7b540);
435 break;
436 default:
437 break;
438 }
439
440 /* Turn all the tiling regions off. */
441 for (i = 0; i < pfb->tile.regions; i++)
442 engine->tile_prog(engine, i);
443
444 /* begin RAM config */
445 vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
446 switch (nv_device(priv)->chipset) {
447 case 0x40:
448 nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
449 nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
450 nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
451 nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
452 nv_wr32(priv, 0x400820, 0);
453 nv_wr32(priv, 0x400824, 0);
454 nv_wr32(priv, 0x400864, vramsz);
455 nv_wr32(priv, 0x400868, vramsz);
456 break;
457 default:
458 switch (nv_device(priv)->chipset) {
459 case 0x41:
460 case 0x42:
461 case 0x43:
462 case 0x45:
463 case 0x4e:
464 case 0x44:
465 case 0x4a:
466 nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
467 nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
468 break;
469 default:
470 nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
471 nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
472 break;
473 }
474 nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
475 nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
476 nv_wr32(priv, 0x400840, 0);
477 nv_wr32(priv, 0x400844, 0);
478 nv_wr32(priv, 0x4008A0, vramsz);
479 nv_wr32(priv, 0x4008A4, vramsz);
480 break;
481 }
482
483 return 0;
484}
485
486struct nouveau_oclass
487nv40_graph_oclass = {
488 .handle = NV_ENGINE(GR, 0x40),
489 .ofuncs = &(struct nouveau_ofuncs) {
490 .ctor = nv40_graph_ctor,
491 .dtor = _nouveau_graph_dtor,
492 .init = nv40_graph_init,
493 .fini = _nouveau_graph_fini,
494 },
495};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 000000000000..d2ac975afc2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
1#ifndef __NV40_GRAPH_H__
2#define __NV40_GRAPH_H__
3
4/* returns 1 if device is one of the nv4x using the 0x4497 object class,
5 * helpful to determine a number of other hardware features
6 */
7static inline int
8nv44_graph_class(void *priv)
9{
10 struct nouveau_device *device = nv_device(priv);
11
12 if ((device->chipset & 0xf0) == 0x60)
13 return 1;
14
15 return !(0x0baf & (1 << (device->chipset & 0x0f)));
16}
17
18void nv40_grctx_init(struct nouveau_device *, u32 *size);
19void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
new file mode 100644
index 000000000000..ab3b9dcaf478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -0,0 +1,888 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/handle.h>
28#include <core/engctx.h>
29#include <core/enum.h>
30
31#include <subdev/fb.h>
32#include <subdev/vm.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
36#include <engine/graph.h>
37
38#include "nv50.h"
39
40struct nv50_graph_priv {
41 struct nouveau_graph base;
42 spinlock_t lock;
43 u32 size;
44};
45
46struct nv50_graph_chan {
47 struct nouveau_graph_chan base;
48};
49
50/*******************************************************************************
51 * Graphics object classes
52 ******************************************************************************/
53
54static int
55nv50_graph_object_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nouveau_gpuobj *obj;
61 int ret;
62
63 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
64 16, 16, 0, &obj);
65 *pobject = nv_object(obj);
66 if (ret)
67 return ret;
68
69 nv_wo32(obj, 0x00, nv_mclass(obj));
70 nv_wo32(obj, 0x04, 0x00000000);
71 nv_wo32(obj, 0x08, 0x00000000);
72 nv_wo32(obj, 0x0c, 0x00000000);
73 return 0;
74}
75
76static struct nouveau_ofuncs
77nv50_graph_ofuncs = {
78 .ctor = nv50_graph_object_ctor,
79 .dtor = _nouveau_gpuobj_dtor,
80 .init = _nouveau_gpuobj_init,
81 .fini = _nouveau_gpuobj_fini,
82 .rd32 = _nouveau_gpuobj_rd32,
83 .wr32 = _nouveau_gpuobj_wr32,
84};
85
86static struct nouveau_oclass
87nv50_graph_sclass[] = {
88 { 0x0030, &nv50_graph_ofuncs },
89 { 0x502d, &nv50_graph_ofuncs },
90 { 0x5039, &nv50_graph_ofuncs },
91 { 0x5097, &nv50_graph_ofuncs },
92 { 0x50c0, &nv50_graph_ofuncs },
93 {}
94};
95
96static struct nouveau_oclass
97nv84_graph_sclass[] = {
98 { 0x0030, &nv50_graph_ofuncs },
99 { 0x502d, &nv50_graph_ofuncs },
100 { 0x5039, &nv50_graph_ofuncs },
101 { 0x50c0, &nv50_graph_ofuncs },
102 { 0x8297, &nv50_graph_ofuncs },
103 {}
104};
105
106static struct nouveau_oclass
107nva0_graph_sclass[] = {
108 { 0x0030, &nv50_graph_ofuncs },
109 { 0x502d, &nv50_graph_ofuncs },
110 { 0x5039, &nv50_graph_ofuncs },
111 { 0x50c0, &nv50_graph_ofuncs },
112 { 0x8397, &nv50_graph_ofuncs },
113 {}
114};
115
116static struct nouveau_oclass
117nva3_graph_sclass[] = {
118 { 0x0030, &nv50_graph_ofuncs },
119 { 0x502d, &nv50_graph_ofuncs },
120 { 0x5039, &nv50_graph_ofuncs },
121 { 0x50c0, &nv50_graph_ofuncs },
122 { 0x8597, &nv50_graph_ofuncs },
123 { 0x85c0, &nv50_graph_ofuncs },
124 {}
125};
126
127static struct nouveau_oclass
128nvaf_graph_sclass[] = {
129 { 0x0030, &nv50_graph_ofuncs },
130 { 0x502d, &nv50_graph_ofuncs },
131 { 0x5039, &nv50_graph_ofuncs },
132 { 0x50c0, &nv50_graph_ofuncs },
133 { 0x85c0, &nv50_graph_ofuncs },
134 { 0x8697, &nv50_graph_ofuncs },
135 {}
136};
137
138/*******************************************************************************
139 * PGRAPH context
140 ******************************************************************************/
141
142static int
143nv50_graph_context_ctor(struct nouveau_object *parent,
144 struct nouveau_object *engine,
145 struct nouveau_oclass *oclass, void *data, u32 size,
146 struct nouveau_object **pobject)
147{
148 struct nv50_graph_priv *priv = (void *)engine;
149 struct nv50_graph_chan *chan;
150 int ret;
151
152 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
153 priv->size, 0,
154 NVOBJ_FLAG_ZERO_ALLOC, &chan);
155 *pobject = nv_object(chan);
156 if (ret)
157 return ret;
158
159 nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
160 return 0;
161}
162
163static struct nouveau_oclass
164nv50_graph_cclass = {
165 .handle = NV_ENGCTX(GR, 0x50),
166 .ofuncs = &(struct nouveau_ofuncs) {
167 .ctor = nv50_graph_context_ctor,
168 .dtor = _nouveau_graph_context_dtor,
169 .init = _nouveau_graph_context_init,
170 .fini = _nouveau_graph_context_fini,
171 .rd32 = _nouveau_graph_context_rd32,
172 .wr32 = _nouveau_graph_context_wr32,
173 },
174};
175
176/*******************************************************************************
177 * PGRAPH engine/subdev functions
178 ******************************************************************************/
179
180static int
181nv50_graph_tlb_flush(struct nouveau_engine *engine)
182{
183 nv50_vm_flush_engine(&engine->base, 0x00);
184 return 0;
185}
186
187static int
188nv84_graph_tlb_flush(struct nouveau_engine *engine)
189{
190 struct nouveau_timer *ptimer = nouveau_timer(engine);
191 struct nv50_graph_priv *priv = (void *)engine;
192 bool idle, timeout = false;
193 unsigned long flags;
194 u64 start;
195 u32 tmp;
196
197 spin_lock_irqsave(&priv->lock, flags);
198 nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
199
200 start = ptimer->read(ptimer);
201 do {
202 idle = true;
203
204 for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
205 if ((tmp & 7) == 1)
206 idle = false;
207 }
208
209 for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
210 if ((tmp & 7) == 1)
211 idle = false;
212 }
213
214 for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
215 if ((tmp & 7) == 1)
216 idle = false;
217 }
218 } while (!idle &&
219 !(timeout = ptimer->read(ptimer) - start > 2000000000));
220
221 if (timeout) {
222 nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
223 "0x%08x 0x%08x 0x%08x 0x%08x\n",
224 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
225 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
226 }
227
228 nv50_vm_flush_engine(&engine->base, 0x00);
229
230 nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
231 spin_unlock_irqrestore(&priv->lock, flags);
232 return timeout ? -EBUSY : 0;
233}
234
235static const struct nouveau_enum nv50_mp_exec_error_names[] = {
236 { 3, "STACK_UNDERFLOW", NULL },
237 { 4, "QUADON_ACTIVE", NULL },
238 { 8, "TIMEOUT", NULL },
239 { 0x10, "INVALID_OPCODE", NULL },
240 { 0x40, "BREAKPOINT", NULL },
241 {}
242};
243
244static const struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
245 { 0x00000001, "NOTIFY" },
246 { 0x00000002, "IN" },
247 { 0x00000004, "OUT" },
248 {}
249};
250
251static const struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
252 { 0x00000001, "FAULT" },
253 {}
254};
255
256static const struct nouveau_bitfield nv50_graph_trap_strmout[] = {
257 { 0x00000001, "FAULT" },
258 {}
259};
260
261static const struct nouveau_bitfield nv50_graph_trap_ccache[] = {
262 { 0x00000001, "FAULT" },
263 {}
264};
265
266/* There must be a *lot* of these. Will take some time to gather them up. */
267const struct nouveau_enum nv50_data_error_names[] = {
268 { 0x00000003, "INVALID_OPERATION", NULL },
269 { 0x00000004, "INVALID_VALUE", NULL },
270 { 0x00000005, "INVALID_ENUM", NULL },
271 { 0x00000008, "INVALID_OBJECT", NULL },
272 { 0x00000009, "READ_ONLY_OBJECT", NULL },
273 { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
274 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
275 { 0x0000000c, "INVALID_BITFIELD", NULL },
276 { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
277 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
278 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
279 { 0x00000010, "RT_DOUBLE_BIND", NULL },
280 { 0x00000011, "RT_TYPES_MISMATCH", NULL },
281 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
282 { 0x00000015, "FP_TOO_FEW_REGS", NULL },
283 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
284 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
285 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
286 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
287 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
288 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
289 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
290 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
291 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
292 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
293 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
294 { 0x00000024, "VP_ZERO_INPUTS", NULL },
295 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
296 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
297 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
298 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
299 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
300 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
301 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
302 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
303 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
304 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
305 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
306 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
307 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
308 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
309 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
310 {}
311};
312
313static const struct nouveau_bitfield nv50_graph_intr_name[] = {
314 { 0x00000001, "NOTIFY" },
315 { 0x00000002, "COMPUTE_QUERY" },
316 { 0x00000010, "ILLEGAL_MTHD" },
317 { 0x00000020, "ILLEGAL_CLASS" },
318 { 0x00000040, "DOUBLE_NOTIFY" },
319 { 0x00001000, "CONTEXT_SWITCH" },
320 { 0x00010000, "BUFFER_NOTIFY" },
321 { 0x00100000, "DATA_ERROR" },
322 { 0x00200000, "TRAP" },
323 { 0x01000000, "SINGLE_STEP" },
324 {}
325};
326
327static void
328nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
329{
330 u32 units = nv_rd32(priv, 0x1540);
331 u32 addr, mp10, status, pc, oplow, ophigh;
332 int i;
333 int mps = 0;
334 for (i = 0; i < 4; i++) {
335 if (!(units & 1 << (i+24)))
336 continue;
337 if (nv_device(priv)->chipset < 0xa0)
338 addr = 0x408200 + (tpid << 12) + (i << 7);
339 else
340 addr = 0x408100 + (tpid << 11) + (i << 7);
341 mp10 = nv_rd32(priv, addr + 0x10);
342 status = nv_rd32(priv, addr + 0x14);
343 if (!status)
344 continue;
345 if (display) {
346 nv_rd32(priv, addr + 0x20);
347 pc = nv_rd32(priv, addr + 0x24);
348 oplow = nv_rd32(priv, addr + 0x70);
349 ophigh = nv_rd32(priv, addr + 0x74);
350 nv_error(priv, "TRAP_MP_EXEC - "
351 "TP %d MP %d: ", tpid, i);
352 nouveau_enum_print(nv50_mp_exec_error_names, status);
353 printk(" at %06x warp %d, opcode %08x %08x\n",
354 pc&0xffffff, pc >> 24,
355 oplow, ophigh);
356 }
357 nv_wr32(priv, addr + 0x10, mp10);
358 nv_wr32(priv, addr + 0x14, 0);
359 mps++;
360 }
361 if (!mps && display)
362 nv_error(priv, "TRAP_MP_EXEC - TP %d: "
363 "No MPs claiming errors?\n", tpid);
364}
365
366static void
367nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
368 u32 ustatus_new, int display, const char *name)
369{
370 int tps = 0;
371 u32 units = nv_rd32(priv, 0x1540);
372 int i, r;
373 u32 ustatus_addr, ustatus;
374 for (i = 0; i < 16; i++) {
375 if (!(units & (1 << i)))
376 continue;
377 if (nv_device(priv)->chipset < 0xa0)
378 ustatus_addr = ustatus_old + (i << 12);
379 else
380 ustatus_addr = ustatus_new + (i << 11);
381 ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
382 if (!ustatus)
383 continue;
384 tps++;
385 switch (type) {
386 case 6: /* texture error... unknown for now */
387 if (display) {
388 nv_error(priv, "magic set %d:\n", i);
389 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
390 nv_error(priv, "\t0x%08x: 0x%08x\n", r,
391 nv_rd32(priv, r));
392 }
393 break;
394 case 7: /* MP error */
395 if (ustatus & 0x04030000) {
396 nv50_priv_mp_trap(priv, i, display);
397 ustatus &= ~0x04030000;
398 }
399 break;
400 case 8: /* TPDMA error */
401 {
402 u32 e0c = nv_rd32(priv, ustatus_addr + 4);
403 u32 e10 = nv_rd32(priv, ustatus_addr + 8);
404 u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
405 u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
406 u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
407 u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
408 u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
409 /* 2d engine destination */
410 if (ustatus & 0x00000010) {
411 if (display) {
412 nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
413 i, e14, e10);
414 nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
415 i, e0c, e18, e1c, e20, e24);
416 }
417 ustatus &= ~0x00000010;
418 }
419 /* Render target */
420 if (ustatus & 0x00000040) {
421 if (display) {
422 nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
423 i, e14, e10);
424 nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
425 i, e0c, e18, e1c, e20, e24);
426 }
427 ustatus &= ~0x00000040;
428 }
429 /* CUDA memory: l[], g[] or stack. */
430 if (ustatus & 0x00000080) {
431 if (display) {
432 if (e18 & 0x80000000) {
433 /* g[] read fault? */
434 nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
435 i, e14, e10 | ((e18 >> 24) & 0x1f));
436 e18 &= ~0x1f000000;
437 } else if (e18 & 0xc) {
438 /* g[] write fault? */
439 nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
440 i, e14, e10 | ((e18 >> 7) & 0x1f));
441 e18 &= ~0x00000f80;
442 } else {
443 nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
444 i, e14, e10);
445 }
446 nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
447 i, e0c, e18, e1c, e20, e24);
448 }
449 ustatus &= ~0x00000080;
450 }
451 }
452 break;
453 }
454 if (ustatus) {
455 if (display)
456 nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
457 }
458 nv_wr32(priv, ustatus_addr, 0xc0000000);
459 }
460
461 if (!tps && display)
462 nv_info(priv, "%s - No TPs claiming errors?\n", name);
463}
464
465static int
466nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
467 int chid, u64 inst)
468{
469 u32 status = nv_rd32(priv, 0x400108);
470 u32 ustatus;
471
472 if (!status && display) {
473 nv_error(priv, "TRAP: no units reporting traps?\n");
474 return 1;
475 }
476
477 /* DISPATCH: Relays commands to other units and handles NOTIFY,
478 * COND, QUERY. If you get a trap from it, the command is still stuck
479 * in DISPATCH and you need to do something about it. */
480 if (status & 0x001) {
481 ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
482 if (!ustatus && display) {
483 nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
484 }
485
486 nv_wr32(priv, 0x400500, 0x00000000);
487
488 /* Known to be triggered by screwed up NOTIFY and COND... */
489 if (ustatus & 0x00000001) {
490 u32 addr = nv_rd32(priv, 0x400808);
491 u32 subc = (addr & 0x00070000) >> 16;
492 u32 mthd = (addr & 0x00001ffc);
493 u32 datal = nv_rd32(priv, 0x40080c);
494 u32 datah = nv_rd32(priv, 0x400810);
495 u32 class = nv_rd32(priv, 0x400814);
496 u32 r848 = nv_rd32(priv, 0x400848);
497
498 nv_error(priv, "TRAP DISPATCH_FAULT\n");
499 if (display && (addr & 0x80000000)) {
500 nv_error(priv, "ch %d [0x%010llx] "
501 "subc %d class 0x%04x mthd 0x%04x "
502 "data 0x%08x%08x "
503 "400808 0x%08x 400848 0x%08x\n",
504 chid, inst, subc, class, mthd, datah,
505 datal, addr, r848);
506 } else
507 if (display) {
508 nv_error(priv, "no stuck command?\n");
509 }
510
511 nv_wr32(priv, 0x400808, 0);
512 nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
513 nv_wr32(priv, 0x400848, 0);
514 ustatus &= ~0x00000001;
515 }
516
517 if (ustatus & 0x00000002) {
518 u32 addr = nv_rd32(priv, 0x40084c);
519 u32 subc = (addr & 0x00070000) >> 16;
520 u32 mthd = (addr & 0x00001ffc);
521 u32 data = nv_rd32(priv, 0x40085c);
522 u32 class = nv_rd32(priv, 0x400814);
523
524 nv_error(priv, "TRAP DISPATCH_QUERY\n");
525 if (display && (addr & 0x80000000)) {
526 nv_error(priv, "ch %d [0x%010llx] "
527 "subc %d class 0x%04x mthd 0x%04x "
528 "data 0x%08x 40084c 0x%08x\n",
529 chid, inst, subc, class, mthd,
530 data, addr);
531 } else
532 if (display) {
533 nv_error(priv, "no stuck command?\n");
534 }
535
536 nv_wr32(priv, 0x40084c, 0);
537 ustatus &= ~0x00000002;
538 }
539
540 if (ustatus && display) {
541 nv_error(priv, "TRAP_DISPATCH (unknown "
542 "0x%08x)\n", ustatus);
543 }
544
545 nv_wr32(priv, 0x400804, 0xc0000000);
546 nv_wr32(priv, 0x400108, 0x001);
547 status &= ~0x001;
548 if (!status)
549 return 0;
550 }
551
552 /* M2MF: Memory to memory copy engine. */
553 if (status & 0x002) {
554 u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
555 if (display) {
556 nv_error(priv, "TRAP_M2MF");
557 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
558 printk("\n");
559 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
560 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
561 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
562
563 }
564
565 /* No sane way found yet -- just reset the bugger. */
566 nv_wr32(priv, 0x400040, 2);
567 nv_wr32(priv, 0x400040, 0);
568 nv_wr32(priv, 0x406800, 0xc0000000);
569 nv_wr32(priv, 0x400108, 0x002);
570 status &= ~0x002;
571 }
572
573 /* VFETCH: Fetches data from vertex buffers. */
574 if (status & 0x004) {
575 u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
576 if (display) {
577 nv_error(priv, "TRAP_VFETCH");
578 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
579 printk("\n");
580 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
581 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
582 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
583 }
584
585 nv_wr32(priv, 0x400c04, 0xc0000000);
586 nv_wr32(priv, 0x400108, 0x004);
587 status &= ~0x004;
588 }
589
590 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
591 if (status & 0x008) {
592 ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
593 if (display) {
594 nv_error(priv, "TRAP_STRMOUT");
595 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
596 printk("\n");
597 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
598 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
599 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
600
601 }
602
603 /* No sane way found yet -- just reset the bugger. */
604 nv_wr32(priv, 0x400040, 0x80);
605 nv_wr32(priv, 0x400040, 0);
606 nv_wr32(priv, 0x401800, 0xc0000000);
607 nv_wr32(priv, 0x400108, 0x008);
608 status &= ~0x008;
609 }
610
611 /* CCACHE: Handles code and c[] caches and fills them. */
612 if (status & 0x010) {
613 ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
614 if (display) {
615 nv_error(priv, "TRAP_CCACHE");
616 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
617 printk("\n");
618 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
619 " %08x %08x %08x\n",
620 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
621 nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
622 nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
623 nv_rd32(priv, 0x40501c));
624
625 }
626
627 nv_wr32(priv, 0x405018, 0xc0000000);
628 nv_wr32(priv, 0x400108, 0x010);
629 status &= ~0x010;
630 }
631
632 /* Unknown, not seen yet... 0x402000 is the only trap status reg
633 * remaining, so try to handle it anyway. Perhaps related to that
634 * unknown DMA slot on tesla? */
635 if (status & 0x20) {
636 ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
637 if (display)
638 nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
639 nv_wr32(priv, 0x402000, 0xc0000000);
640 /* no status modifiction on purpose */
641 }
642
643 /* TEXTURE: CUDA texturing units */
644 if (status & 0x040) {
645 nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
646 "TRAP_TEXTURE");
647 nv_wr32(priv, 0x400108, 0x040);
648 status &= ~0x040;
649 }
650
651 /* MP: CUDA execution engines. */
652 if (status & 0x080) {
653 nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
654 "TRAP_MP");
655 nv_wr32(priv, 0x400108, 0x080);
656 status &= ~0x080;
657 }
658
659 /* TPDMA: Handles TP-initiated uncached memory accesses:
660 * l[], g[], stack, 2d surfaces, render targets. */
661 if (status & 0x100) {
662 nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
663 "TRAP_TPDMA");
664 nv_wr32(priv, 0x400108, 0x100);
665 status &= ~0x100;
666 }
667
668 if (status) {
669 if (display)
670 nv_error(priv, "TRAP: unknown 0x%08x\n", status);
671 nv_wr32(priv, 0x400108, status);
672 }
673
674 return 1;
675}
676
677static void
678nv50_graph_intr(struct nouveau_subdev *subdev)
679{
680 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
681 struct nouveau_engine *engine = nv_engine(subdev);
682 struct nouveau_object *engctx;
683 struct nouveau_handle *handle = NULL;
684 struct nv50_graph_priv *priv = (void *)subdev;
685 u32 stat = nv_rd32(priv, 0x400100);
686 u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
687 u32 addr = nv_rd32(priv, 0x400704);
688 u32 subc = (addr & 0x00070000) >> 16;
689 u32 mthd = (addr & 0x00001ffc);
690 u32 data = nv_rd32(priv, 0x400708);
691 u32 class = nv_rd32(priv, 0x400814);
692 u32 show = stat;
693 int chid;
694
695 engctx = nouveau_engctx_get(engine, inst);
696 chid = pfifo->chid(pfifo, engctx);
697
698 if (stat & 0x00000010) {
699 handle = nouveau_handle_get_class(engctx, class);
700 if (handle && !nv_call(handle->object, mthd, data))
701 show &= ~0x00000010;
702 nouveau_handle_put(handle);
703 }
704
705 if (show & 0x00100000) {
706 u32 ecode = nv_rd32(priv, 0x400110);
707 nv_error(priv, "DATA_ERROR ");
708 nouveau_enum_print(nv50_data_error_names, ecode);
709 printk("\n");
710 }
711
712 if (stat & 0x00200000) {
713 if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12))
714 show &= ~0x00200000;
715 }
716
717 nv_wr32(priv, 0x400100, stat);
718 nv_wr32(priv, 0x400500, 0x00010001);
719
720 if (show) {
721 nv_info(priv, "");
722 nouveau_bitfield_print(nv50_graph_intr_name, show);
723 printk("\n");
724 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
725 "mthd 0x%04x data 0x%08x\n",
726 chid, (u64)inst << 12, subc, class, mthd, data);
727 nv50_fb_trap(nouveau_fb(priv), 1);
728 }
729
730 if (nv_rd32(priv, 0x400824) & (1 << 31))
731 nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
732
733 nouveau_engctx_put(engctx);
734}
735
736static int
737nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
738 struct nouveau_oclass *oclass, void *data, u32 size,
739 struct nouveau_object **pobject)
740{
741 struct nv50_graph_priv *priv;
742 int ret;
743
744 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
745 *pobject = nv_object(priv);
746 if (ret)
747 return ret;
748
749 nv_subdev(priv)->unit = 0x00201000;
750 nv_subdev(priv)->intr = nv50_graph_intr;
751 nv_engine(priv)->cclass = &nv50_graph_cclass;
752
753 switch (nv_device(priv)->chipset) {
754 case 0x50:
755 nv_engine(priv)->sclass = nv50_graph_sclass;
756 break;
757 case 0x84:
758 case 0x86:
759 case 0x92:
760 case 0x94:
761 case 0x96:
762 case 0x98:
763 nv_engine(priv)->sclass = nv84_graph_sclass;
764 break;
765 case 0xa0:
766 case 0xaa:
767 case 0xac:
768 nv_engine(priv)->sclass = nva0_graph_sclass;
769 break;
770 case 0xa3:
771 case 0xa5:
772 case 0xa8:
773 nv_engine(priv)->sclass = nva3_graph_sclass;
774 break;
775 case 0xaf:
776 nv_engine(priv)->sclass = nvaf_graph_sclass;
777 break;
778
779 };
780
781 if (nv_device(priv)->chipset == 0x50 ||
782 nv_device(priv)->chipset == 0xac)
783 nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
784 else
785 nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
786
787 spin_lock_init(&priv->lock);
788 return 0;
789}
790
791static int
792nv50_graph_init(struct nouveau_object *object)
793{
794 struct nv50_graph_priv *priv = (void *)object;
795 int ret, units, i;
796
797 ret = nouveau_graph_init(&priv->base);
798 if (ret)
799 return ret;
800
801 /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
802 nv_wr32(priv, 0x40008c, 0x00000004);
803
804 /* reset/enable traps and interrupts */
805 nv_wr32(priv, 0x400804, 0xc0000000);
806 nv_wr32(priv, 0x406800, 0xc0000000);
807 nv_wr32(priv, 0x400c04, 0xc0000000);
808 nv_wr32(priv, 0x401800, 0xc0000000);
809 nv_wr32(priv, 0x405018, 0xc0000000);
810 nv_wr32(priv, 0x402000, 0xc0000000);
811
812 units = nv_rd32(priv, 0x001540);
813 for (i = 0; i < 16; i++) {
814 if (!(units & (1 << i)))
815 continue;
816
817 if (nv_device(priv)->chipset < 0xa0) {
818 nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
819 nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
820 nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
821 } else {
822 nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
823 nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
824 nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
825 }
826 }
827
828 nv_wr32(priv, 0x400108, 0xffffffff);
829 nv_wr32(priv, 0x400138, 0xffffffff);
830 nv_wr32(priv, 0x400100, 0xffffffff);
831 nv_wr32(priv, 0x40013c, 0xffffffff);
832 nv_wr32(priv, 0x400500, 0x00010001);
833
834 /* upload context program, initialise ctxctl defaults */
835 ret = nv50_grctx_init(nv_device(priv), &priv->size);
836 if (ret)
837 return ret;
838
839 nv_wr32(priv, 0x400824, 0x00000000);
840 nv_wr32(priv, 0x400828, 0x00000000);
841 nv_wr32(priv, 0x40082c, 0x00000000);
842 nv_wr32(priv, 0x400830, 0x00000000);
843 nv_wr32(priv, 0x400724, 0x00000000);
844 nv_wr32(priv, 0x40032c, 0x00000000);
845 nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
846
847 /* some unknown zcull magic */
848 switch (nv_device(priv)->chipset & 0xf0) {
849 case 0x50:
850 case 0x80:
851 case 0x90:
852 nv_wr32(priv, 0x402ca8, 0x00000800);
853 break;
854 case 0xa0:
855 default:
856 nv_wr32(priv, 0x402cc0, 0x00000000);
857 if (nv_device(priv)->chipset == 0xa0 ||
858 nv_device(priv)->chipset == 0xaa ||
859 nv_device(priv)->chipset == 0xac) {
860 nv_wr32(priv, 0x402ca8, 0x00000802);
861 } else {
862 nv_wr32(priv, 0x402cc0, 0x00000000);
863 nv_wr32(priv, 0x402ca8, 0x00000002);
864 }
865
866 break;
867 }
868
869 /* zero out zcull regions */
870 for (i = 0; i < 8; i++) {
871 nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
872 nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
873 nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
874 nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
875 }
876 return 0;
877}
878
879struct nouveau_oclass
880nv50_graph_oclass = {
881 .handle = NV_ENGINE(GR, 0x50),
882 .ofuncs = &(struct nouveau_ofuncs) {
883 .ctor = nv50_graph_ctor,
884 .dtor = _nouveau_graph_dtor,
885 .init = nv50_graph_init,
886 .fini = _nouveau_graph_fini,
887 },
888};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 000000000000..0505fb419bde
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
1#ifndef __NV50_GRAPH_H__
2#define __NV50_GRAPH_H__
3
4int nv50_grctx_init(struct nouveau_device *, u32 *size);
5void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
6
7#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
new file mode 100644
index 000000000000..c62f2d0f5f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -0,0 +1,955 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26#include "fuc/hubnvc0.fuc.h"
27#include "fuc/gpcnvc0.fuc.h"
28
29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33static struct nouveau_oclass
34nvc0_graph_sclass[] = {
35 { 0x902d, &nouveau_object_ofuncs },
36 { 0x9039, &nouveau_object_ofuncs },
37 { 0x9097, &nouveau_object_ofuncs },
38 { 0x90c0, &nouveau_object_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nvc1_graph_sclass[] = {
44 { 0x902d, &nouveau_object_ofuncs },
45 { 0x9039, &nouveau_object_ofuncs },
46 { 0x9097, &nouveau_object_ofuncs },
47 { 0x90c0, &nouveau_object_ofuncs },
48 { 0x9197, &nouveau_object_ofuncs },
49 {}
50};
51
52static struct nouveau_oclass
53nvc8_graph_sclass[] = {
54 { 0x902d, &nouveau_object_ofuncs },
55 { 0x9039, &nouveau_object_ofuncs },
56 { 0x9097, &nouveau_object_ofuncs },
57 { 0x90c0, &nouveau_object_ofuncs },
58 { 0x9197, &nouveau_object_ofuncs },
59 { 0x9297, &nouveau_object_ofuncs },
60 {}
61};
62
63/*******************************************************************************
64 * PGRAPH context
65 ******************************************************************************/
66
67int
68nvc0_graph_context_ctor(struct nouveau_object *parent,
69 struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *args, u32 size,
71 struct nouveau_object **pobject)
72{
73 struct nouveau_vm *vm = nouveau_client(parent)->vm;
74 struct nvc0_graph_priv *priv = (void *)engine;
75 struct nvc0_graph_data *data = priv->mmio_data;
76 struct nvc0_graph_mmio *mmio = priv->mmio_list;
77 struct nvc0_graph_chan *chan;
78 int ret, i;
79
80 /* allocate memory for context, and fill with default values */
81 ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
82 priv->size, 0x100,
83 NVOBJ_FLAG_ZERO_ALLOC, &chan);
84 *pobject = nv_object(chan);
85 if (ret)
86 return ret;
87
88 /* allocate memory for a "mmio list" buffer that's used by the HUB
89 * fuc to modify some per-context register settings on first load
90 * of the context.
91 */
92 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
93 if (ret)
94 return ret;
95
96 ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
97 NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
98 &chan->mmio_vma);
99 if (ret)
100 return ret;
101
102 /* allocate buffers referenced by mmio list */
103 for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
104 ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
105 0, &chan->data[i].mem);
106 if (ret)
107 return ret;
108
109 ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
110 &chan->data[i].vma);
111 if (ret)
112 return ret;
113
114 data++;
115 }
116
117 /* finally, fill in the mmio list and point the context at it */
118 for (i = 0; mmio->addr && i < ARRAY_SIZE(priv->mmio_list); i++) {
119 u32 addr = mmio->addr;
120 u32 data = mmio->data;
121
122 if (mmio->shift) {
123 u64 info = chan->data[mmio->buffer].vma.offset;
124 data |= info >> mmio->shift;
125 }
126
127 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
128 nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
129 mmio++;
130 }
131
132 for (i = 0; i < priv->size; i += 4)
133 nv_wo32(chan, i, priv->data[i / 4]);
134
135 if (!priv->firmware) {
136 nv_wo32(chan, 0x00, chan->mmio_nr / 2);
137 nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
138 } else {
139 nv_wo32(chan, 0xf4, 0);
140 nv_wo32(chan, 0xf8, 0);
141 nv_wo32(chan, 0x10, chan->mmio_nr / 2);
142 nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
143 nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
144 nv_wo32(chan, 0x1c, 1);
145 nv_wo32(chan, 0x20, 0);
146 nv_wo32(chan, 0x28, 0);
147 nv_wo32(chan, 0x2c, 0);
148 }
149
150 return 0;
151}
152
153void
154nvc0_graph_context_dtor(struct nouveau_object *object)
155{
156 struct nvc0_graph_chan *chan = (void *)object;
157 int i;
158
159 for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
160 nouveau_gpuobj_unmap(&chan->data[i].vma);
161 nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
162 }
163
164 nouveau_gpuobj_unmap(&chan->mmio_vma);
165 nouveau_gpuobj_ref(NULL, &chan->mmio);
166
167 nouveau_graph_context_destroy(&chan->base);
168}
169
170static struct nouveau_oclass
171nvc0_graph_cclass = {
172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nvc0_graph_context_ctor,
174 .dtor = nvc0_graph_context_dtor,
175 .init = _nouveau_graph_context_init,
176 .fini = _nouveau_graph_context_fini,
177 .rd32 = _nouveau_graph_context_rd32,
178 .wr32 = _nouveau_graph_context_wr32,
179 },
180};
181
182/*******************************************************************************
183 * PGRAPH engine/subdev functions
184 ******************************************************************************/
185
186static void
187nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
188{
189 nv_error(priv, "%06x - done 0x%08x\n", base,
190 nv_rd32(priv, base + 0x400));
191 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
192 nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
193 nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
194 nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
195 nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
196 nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
197}
198
199void
200nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
201{
202 u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
203 u32 gpc;
204
205 nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
206 for (gpc = 0; gpc < gpcnr; gpc++)
207 nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
208}
209
210static void
211nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
212{
213 u32 ustat = nv_rd32(priv, 0x409c18);
214
215 if (ustat & 0x00000001)
216 nv_error(priv, "CTXCTRL ucode error\n");
217 if (ustat & 0x00080000)
218 nv_error(priv, "CTXCTRL watchdog timeout\n");
219 if (ustat & ~0x00080001)
220 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
221
222 nvc0_graph_ctxctl_debug(priv);
223 nv_wr32(priv, 0x409c20, ustat);
224}
225
226static void
227nvc0_graph_trap_tpc(struct nvc0_graph_priv *priv, int gpc, int tpc)
228{
229 u32 stat = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0508));
230
231 if (stat & 0x00000001) {
232 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0224));
233 nv_error(priv, "GPC%d/TPC%d/TEX: 0x%08x\n", gpc, tpc, trap);
234 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0224), 0xc0000000);
235 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000001);
236 stat &= ~0x00000001;
237 }
238
239 if (stat & 0x00000002) {
240 u32 trap0 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0644));
241 u32 trap1 = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x064c));
242 nv_error(priv, "GPC%d/TPC%d/MP: 0x%08x 0x%08x\n",
243 gpc, tpc, trap0, trap1);
244 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0644), 0x001ffffe);
245 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x064c), 0x0000000f);
246 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000002);
247 stat &= ~0x00000002;
248 }
249
250 if (stat & 0x00000004) {
251 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x0084));
252 nv_error(priv, "GPC%d/TPC%d/POLY: 0x%08x\n", gpc, tpc, trap);
253 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0084), 0xc0000000);
254 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000004);
255 stat &= ~0x00000004;
256 }
257
258 if (stat & 0x00000008) {
259 u32 trap = nv_rd32(priv, TPC_UNIT(gpc, tpc, 0x048c));
260 nv_error(priv, "GPC%d/TPC%d/L1C: 0x%08x\n", gpc, tpc, trap);
261 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x048c), 0xc0000000);
262 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), 0x00000008);
263 stat &= ~0x00000008;
264 }
265
266 if (stat) {
267 nv_error(priv, "GPC%d/TPC%d/0x%08x: unknown\n", gpc, tpc, stat);
268 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0508), stat);
269 }
270}
271
272static void
273nvc0_graph_trap_gpc(struct nvc0_graph_priv *priv, int gpc)
274{
275 u32 stat = nv_rd32(priv, GPC_UNIT(gpc, 0x2c90));
276 int tpc;
277
278 if (stat & 0x00000001) {
279 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0420));
280 nv_error(priv, "GPC%d/PROP: 0x%08x\n", gpc, trap);
281 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
282 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000001);
283 stat &= ~0x00000001;
284 }
285
286 if (stat & 0x00000002) {
287 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0900));
288 nv_error(priv, "GPC%d/ZCULL: 0x%08x\n", gpc, trap);
289 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
290 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000002);
291 stat &= ~0x00000002;
292 }
293
294 if (stat & 0x00000004) {
295 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x1028));
296 nv_error(priv, "GPC%d/CCACHE: 0x%08x\n", gpc, trap);
297 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
298 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000004);
299 stat &= ~0x00000004;
300 }
301
302 if (stat & 0x00000008) {
303 u32 trap = nv_rd32(priv, GPC_UNIT(gpc, 0x0824));
304 nv_error(priv, "GPC%d/ESETUP: 0x%08x\n", gpc, trap);
305 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
306 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0x00000008);
307 stat &= ~0x00000009;
308 }
309
310 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
311 u32 mask = 0x00010000 << tpc;
312 if (stat & mask) {
313 nvc0_graph_trap_tpc(priv, gpc, tpc);
314 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), mask);
315 stat &= ~mask;
316 }
317 }
318
319 if (stat) {
320 nv_error(priv, "GPC%d/0x%08x: unknown\n", gpc, stat);
321 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), stat);
322 }
323}
324
325static void
326nvc0_graph_trap_intr(struct nvc0_graph_priv *priv)
327{
328 u32 trap = nv_rd32(priv, 0x400108);
329 int rop, gpc;
330
331 if (trap & 0x00000001) {
332 u32 stat = nv_rd32(priv, 0x404000);
333 nv_error(priv, "DISPATCH 0x%08x\n", stat);
334 nv_wr32(priv, 0x404000, 0xc0000000);
335 nv_wr32(priv, 0x400108, 0x00000001);
336 trap &= ~0x00000001;
337 }
338
339 if (trap & 0x00000002) {
340 u32 stat = nv_rd32(priv, 0x404600);
341 nv_error(priv, "M2MF 0x%08x\n", stat);
342 nv_wr32(priv, 0x404600, 0xc0000000);
343 nv_wr32(priv, 0x400108, 0x00000002);
344 trap &= ~0x00000002;
345 }
346
347 if (trap & 0x00000008) {
348 u32 stat = nv_rd32(priv, 0x408030);
349 nv_error(priv, "CCACHE 0x%08x\n", stat);
350 nv_wr32(priv, 0x408030, 0xc0000000);
351 nv_wr32(priv, 0x400108, 0x00000008);
352 trap &= ~0x00000008;
353 }
354
355 if (trap & 0x00000010) {
356 u32 stat = nv_rd32(priv, 0x405840);
357 nv_error(priv, "SHADER 0x%08x\n", stat);
358 nv_wr32(priv, 0x405840, 0xc0000000);
359 nv_wr32(priv, 0x400108, 0x00000010);
360 trap &= ~0x00000010;
361 }
362
363 if (trap & 0x00000040) {
364 u32 stat = nv_rd32(priv, 0x40601c);
365 nv_error(priv, "UNK6 0x%08x\n", stat);
366 nv_wr32(priv, 0x40601c, 0xc0000000);
367 nv_wr32(priv, 0x400108, 0x00000040);
368 trap &= ~0x00000040;
369 }
370
371 if (trap & 0x00000080) {
372 u32 stat = nv_rd32(priv, 0x404490);
373 nv_error(priv, "MACRO 0x%08x\n", stat);
374 nv_wr32(priv, 0x404490, 0xc0000000);
375 nv_wr32(priv, 0x400108, 0x00000080);
376 trap &= ~0x00000080;
377 }
378
379 if (trap & 0x01000000) {
380 u32 stat = nv_rd32(priv, 0x400118);
381 for (gpc = 0; stat && gpc < priv->gpc_nr; gpc++) {
382 u32 mask = 0x00000001 << gpc;
383 if (stat & mask) {
384 nvc0_graph_trap_gpc(priv, gpc);
385 nv_wr32(priv, 0x400118, mask);
386 stat &= ~mask;
387 }
388 }
389 nv_wr32(priv, 0x400108, 0x01000000);
390 trap &= ~0x01000000;
391 }
392
393 if (trap & 0x02000000) {
394 for (rop = 0; rop < priv->rop_nr; rop++) {
395 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
396 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
397 nv_error(priv, "ROP%d 0x%08x 0x%08x\n",
398 rop, statz, statc);
399 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
400 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
401 }
402 nv_wr32(priv, 0x400108, 0x02000000);
403 trap &= ~0x02000000;
404 }
405
406 if (trap) {
407 nv_error(priv, "TRAP UNHANDLED 0x%08x\n", trap);
408 nv_wr32(priv, 0x400108, trap);
409 }
410}
411
412static void
413nvc0_graph_intr(struct nouveau_subdev *subdev)
414{
415 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
416 struct nouveau_engine *engine = nv_engine(subdev);
417 struct nouveau_object *engctx;
418 struct nouveau_handle *handle;
419 struct nvc0_graph_priv *priv = (void *)subdev;
420 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
421 u32 stat = nv_rd32(priv, 0x400100);
422 u32 addr = nv_rd32(priv, 0x400704);
423 u32 mthd = (addr & 0x00003ffc);
424 u32 subc = (addr & 0x00070000) >> 16;
425 u32 data = nv_rd32(priv, 0x400708);
426 u32 code = nv_rd32(priv, 0x400110);
427 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
428 int chid;
429
430 engctx = nouveau_engctx_get(engine, inst);
431 chid = pfifo->chid(pfifo, engctx);
432
433 if (stat & 0x00000010) {
434 handle = nouveau_handle_get_class(engctx, class);
435 if (!handle || nv_call(handle->object, mthd, data)) {
436 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
437 "subc %d class 0x%04x mthd 0x%04x "
438 "data 0x%08x\n",
439 chid, inst << 12, subc, class, mthd, data);
440 }
441 nouveau_handle_put(handle);
442 nv_wr32(priv, 0x400100, 0x00000010);
443 stat &= ~0x00000010;
444 }
445
446 if (stat & 0x00000020) {
447 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
448 "class 0x%04x mthd 0x%04x data 0x%08x\n",
449 chid, inst << 12, subc, class, mthd, data);
450 nv_wr32(priv, 0x400100, 0x00000020);
451 stat &= ~0x00000020;
452 }
453
454 if (stat & 0x00100000) {
455 nv_error(priv, "DATA_ERROR [");
456 nouveau_enum_print(nv50_data_error_names, code);
457 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
458 "mthd 0x%04x data 0x%08x\n",
459 chid, inst << 12, subc, class, mthd, data);
460 nv_wr32(priv, 0x400100, 0x00100000);
461 stat &= ~0x00100000;
462 }
463
464 if (stat & 0x00200000) {
465 nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12);
466 nvc0_graph_trap_intr(priv);
467 nv_wr32(priv, 0x400100, 0x00200000);
468 stat &= ~0x00200000;
469 }
470
471 if (stat & 0x00080000) {
472 nvc0_graph_ctxctl_isr(priv);
473 nv_wr32(priv, 0x400100, 0x00080000);
474 stat &= ~0x00080000;
475 }
476
477 if (stat) {
478 nv_error(priv, "unknown stat 0x%08x\n", stat);
479 nv_wr32(priv, 0x400100, stat);
480 }
481
482 nv_wr32(priv, 0x400500, 0x00010001);
483 nouveau_engctx_put(engctx);
484}
485
486int
487nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
488 struct nvc0_graph_fuc *fuc)
489{
490 struct nouveau_device *device = nv_device(priv);
491 const struct firmware *fw;
492 char f[32];
493 int ret;
494
495 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
496 ret = request_firmware(&fw, f, &device->pdev->dev);
497 if (ret) {
498 snprintf(f, sizeof(f), "nouveau/%s", fwname);
499 ret = request_firmware(&fw, f, &device->pdev->dev);
500 if (ret) {
501 nv_error(priv, "failed to load %s\n", fwname);
502 return ret;
503 }
504 }
505
506 fuc->size = fw->size;
507 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
508 release_firmware(fw);
509 return (fuc->data != NULL) ? 0 : -ENOMEM;
510}
511
512static int
513nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
514 struct nouveau_oclass *oclass, void *data, u32 size,
515 struct nouveau_object **pobject)
516{
517 struct nouveau_device *device = nv_device(parent);
518 struct nvc0_graph_priv *priv;
519 bool enable = true;
520 int ret, i;
521
522 switch (device->chipset) {
523 case 0xd9: /* known broken without binary driver firmware */
524 enable = false;
525 break;
526 default:
527 break;
528 }
529
530 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
531 *pobject = nv_object(priv);
532 if (ret)
533 return ret;
534
535 nv_subdev(priv)->unit = 0x18001000;
536 nv_subdev(priv)->intr = nvc0_graph_intr;
537 nv_engine(priv)->cclass = &nvc0_graph_cclass;
538
539 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
540 nv_info(priv, "using external firmware\n");
541 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
542 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
543 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
544 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
545 return -EINVAL;
546 priv->firmware = true;
547 }
548
549 switch (nvc0_graph_class(priv)) {
550 case 0x9097:
551 nv_engine(priv)->sclass = nvc0_graph_sclass;
552 break;
553 case 0x9197:
554 nv_engine(priv)->sclass = nvc1_graph_sclass;
555 break;
556 case 0x9297:
557 nv_engine(priv)->sclass = nvc8_graph_sclass;
558 break;
559 }
560
561 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
562 if (ret)
563 return ret;
564
565 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
566 if (ret)
567 return ret;
568
569 for (i = 0; i < 0x1000; i += 4) {
570 nv_wo32(priv->unk4188b4, i, 0x00000010);
571 nv_wo32(priv->unk4188b8, i, 0x00000010);
572 }
573
574 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
575 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
576 for (i = 0; i < priv->gpc_nr; i++) {
577 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
578 priv->tpc_total += priv->tpc_nr[i];
579 }
580
581 /*XXX: these need figuring out... though it might not even matter */
582 switch (nv_device(priv)->chipset) {
583 case 0xc0:
584 if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
585 priv->magic_not_rop_nr = 0x07;
586 } else
587 if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
588 priv->magic_not_rop_nr = 0x05;
589 } else
590 if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
591 priv->magic_not_rop_nr = 0x06;
592 }
593 break;
594 case 0xc3: /* 450, 4/0/0/0, 2 */
595 priv->magic_not_rop_nr = 0x03;
596 break;
597 case 0xc4: /* 460, 3/4/0/0, 4 */
598 priv->magic_not_rop_nr = 0x01;
599 break;
600 case 0xc1: /* 2/0/0/0, 1 */
601 priv->magic_not_rop_nr = 0x01;
602 break;
603 case 0xc8: /* 4/4/3/4, 5 */
604 priv->magic_not_rop_nr = 0x06;
605 break;
606 case 0xce: /* 4/4/0/0, 4 */
607 priv->magic_not_rop_nr = 0x03;
608 break;
609 case 0xcf: /* 4/0/0/0, 3 */
610 priv->magic_not_rop_nr = 0x03;
611 break;
612 case 0xd9: /* 1/0/0/0, 1 */
613 priv->magic_not_rop_nr = 0x01;
614 break;
615 }
616
617 return 0;
618}
619
620static void
621nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
622{
623 if (fuc->data) {
624 kfree(fuc->data);
625 fuc->data = NULL;
626 }
627}
628
629void
630nvc0_graph_dtor(struct nouveau_object *object)
631{
632 struct nvc0_graph_priv *priv = (void *)object;
633
634 if (priv->data)
635 kfree(priv->data);
636
637 nvc0_graph_dtor_fw(&priv->fuc409c);
638 nvc0_graph_dtor_fw(&priv->fuc409d);
639 nvc0_graph_dtor_fw(&priv->fuc41ac);
640 nvc0_graph_dtor_fw(&priv->fuc41ad);
641
642 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
643 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
644
645 nouveau_graph_destroy(&priv->base);
646}
647
648static void
649nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
650{
651 int i;
652
653 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
654 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
655 for (i = 0; i < 4; i++)
656 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
657 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
658 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
659}
660
661static void
662nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
663{
664 nv_wr32(priv, 0x400080, 0x003083c2);
665 nv_wr32(priv, 0x400088, 0x00006fe7);
666 nv_wr32(priv, 0x40008c, 0x00000000);
667 nv_wr32(priv, 0x400090, 0x00000030);
668 nv_wr32(priv, 0x40013c, 0x013901f7);
669 nv_wr32(priv, 0x400140, 0x00000100);
670 nv_wr32(priv, 0x400144, 0x00000000);
671 nv_wr32(priv, 0x400148, 0x00000110);
672 nv_wr32(priv, 0x400138, 0x00000000);
673 nv_wr32(priv, 0x400130, 0x00000000);
674 nv_wr32(priv, 0x400134, 0x00000000);
675 nv_wr32(priv, 0x400124, 0x00000002);
676}
677
678static void
679nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
680{
681 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
682 u32 data[TPC_MAX / 8];
683 u8 tpcnr[GPC_MAX];
684 int i, gpc, tpc;
685
686 nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
687
688 /*
689 * TP ROP UNKVAL(magic_not_rop_nr)
690 * 450: 4/0/0/0 2 3
691 * 460: 3/4/0/0 4 1
692 * 465: 3/4/4/0 4 7
693 * 470: 3/3/4/4 5 5
694 * 480: 3/4/4/4 6 6
695 */
696
697 memset(data, 0x00, sizeof(data));
698 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
699 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
700 do {
701 gpc = (gpc + 1) % priv->gpc_nr;
702 } while (!tpcnr[gpc]);
703 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
704
705 data[i / 8] |= tpc << ((i % 8) * 4);
706 }
707
708 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
709 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
710 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
711 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
712
713 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
714 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
715 priv->tpc_nr[gpc]);
716 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
717 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
718 }
719
720 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
721 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
722}
723
724static void
725nvc0_graph_init_units(struct nvc0_graph_priv *priv)
726{
727 nv_wr32(priv, 0x409c24, 0x000f0000);
728 nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
729 nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
730 nv_wr32(priv, 0x408030, 0xc0000000);
731 nv_wr32(priv, 0x40601c, 0xc0000000);
732 nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
733 nv_wr32(priv, 0x406018, 0xc0000000);
734 nv_wr32(priv, 0x405840, 0xc0000000);
735 nv_wr32(priv, 0x405844, 0x00ffffff);
736 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
737 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
738}
739
740static void
741nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
742{
743 int gpc, tpc;
744
745 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
746 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
747 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
748 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
749 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
750 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
751 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
752 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
753 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
754 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
755 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
756 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
757 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
758 }
759 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
760 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
761 }
762}
763
764static void
765nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
766{
767 int rop;
768
769 for (rop = 0; rop < priv->rop_nr; rop++) {
770 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
771 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
772 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
773 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
774 }
775}
776
777void
778nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
779 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
780{
781 int i;
782
783 nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
784 for (i = 0; i < data->size / 4; i++)
785 nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
786
787 nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
788 for (i = 0; i < code->size / 4; i++) {
789 if ((i & 0x3f) == 0)
790 nv_wr32(priv, fuc_base + 0x0188, i >> 6);
791 nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
792 }
793}
794
795static int
796nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
797{
798 u32 r000260;
799 int i;
800
801 if (priv->firmware) {
802 /* load fuc microcode */
803 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
804 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
805 &priv->fuc409d);
806 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
807 &priv->fuc41ad);
808 nv_wr32(priv, 0x000260, r000260);
809
810 /* start both of them running */
811 nv_wr32(priv, 0x409840, 0xffffffff);
812 nv_wr32(priv, 0x41a10c, 0x00000000);
813 nv_wr32(priv, 0x40910c, 0x00000000);
814 nv_wr32(priv, 0x41a100, 0x00000002);
815 nv_wr32(priv, 0x409100, 0x00000002);
816 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
817 nv_info(priv, "0x409800 wait failed\n");
818
819 nv_wr32(priv, 0x409840, 0xffffffff);
820 nv_wr32(priv, 0x409500, 0x7fffffff);
821 nv_wr32(priv, 0x409504, 0x00000021);
822
823 nv_wr32(priv, 0x409840, 0xffffffff);
824 nv_wr32(priv, 0x409500, 0x00000000);
825 nv_wr32(priv, 0x409504, 0x00000010);
826 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
827 nv_error(priv, "fuc09 req 0x10 timeout\n");
828 return -EBUSY;
829 }
830 priv->size = nv_rd32(priv, 0x409800);
831
832 nv_wr32(priv, 0x409840, 0xffffffff);
833 nv_wr32(priv, 0x409500, 0x00000000);
834 nv_wr32(priv, 0x409504, 0x00000016);
835 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
836 nv_error(priv, "fuc09 req 0x16 timeout\n");
837 return -EBUSY;
838 }
839
840 nv_wr32(priv, 0x409840, 0xffffffff);
841 nv_wr32(priv, 0x409500, 0x00000000);
842 nv_wr32(priv, 0x409504, 0x00000025);
843 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
844 nv_error(priv, "fuc09 req 0x25 timeout\n");
845 return -EBUSY;
846 }
847
848 if (priv->data == NULL) {
849 int ret = nvc0_grctx_generate(priv);
850 if (ret) {
851 nv_error(priv, "failed to construct context\n");
852 return ret;
853 }
854 }
855
856 return 0;
857 }
858
859 /* load HUB microcode */
860 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
861 nv_wr32(priv, 0x4091c0, 0x01000000);
862 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
863 nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
864
865 nv_wr32(priv, 0x409180, 0x01000000);
866 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
867 if ((i & 0x3f) == 0)
868 nv_wr32(priv, 0x409188, i >> 6);
869 nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
870 }
871
872 /* load GPC microcode */
873 nv_wr32(priv, 0x41a1c0, 0x01000000);
874 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
875 nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
876
877 nv_wr32(priv, 0x41a180, 0x01000000);
878 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
879 if ((i & 0x3f) == 0)
880 nv_wr32(priv, 0x41a188, i >> 6);
881 nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
882 }
883 nv_wr32(priv, 0x000260, r000260);
884
885 /* start HUB ucode running, it'll init the GPCs */
886 nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
887 nv_wr32(priv, 0x40910c, 0x00000000);
888 nv_wr32(priv, 0x409100, 0x00000002);
889 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
890 nv_error(priv, "HUB_INIT timed out\n");
891 nvc0_graph_ctxctl_debug(priv);
892 return -EBUSY;
893 }
894
895 priv->size = nv_rd32(priv, 0x409804);
896 if (priv->data == NULL) {
897 int ret = nvc0_grctx_generate(priv);
898 if (ret) {
899 nv_error(priv, "failed to construct context\n");
900 return ret;
901 }
902 }
903
904 return 0;
905}
906
907static int
908nvc0_graph_init(struct nouveau_object *object)
909{
910 struct nvc0_graph_priv *priv = (void *)object;
911 int ret;
912
913 ret = nouveau_graph_init(&priv->base);
914 if (ret)
915 return ret;
916
917 nvc0_graph_init_obj418880(priv);
918 nvc0_graph_init_regs(priv);
919 /*nvc0_graph_init_unitplemented_magics(priv);*/
920 nvc0_graph_init_gpc_0(priv);
921 /*nvc0_graph_init_unitplemented_c242(priv);*/
922
923 nv_wr32(priv, 0x400500, 0x00010001);
924 nv_wr32(priv, 0x400100, 0xffffffff);
925 nv_wr32(priv, 0x40013c, 0xffffffff);
926
927 nvc0_graph_init_units(priv);
928 nvc0_graph_init_gpc_1(priv);
929 nvc0_graph_init_rop(priv);
930
931 nv_wr32(priv, 0x400108, 0xffffffff);
932 nv_wr32(priv, 0x400138, 0xffffffff);
933 nv_wr32(priv, 0x400118, 0xffffffff);
934 nv_wr32(priv, 0x400130, 0xffffffff);
935 nv_wr32(priv, 0x40011c, 0xffffffff);
936 nv_wr32(priv, 0x400134, 0xffffffff);
937 nv_wr32(priv, 0x400054, 0x34ce3464);
938
939 ret = nvc0_graph_init_ctxctl(priv);
940 if (ret)
941 return ret;
942
943 return 0;
944}
945
946struct nouveau_oclass
947nvc0_graph_oclass = {
948 .handle = NV_ENGINE(GR, 0xc0),
949 .ofuncs = &(struct nouveau_ofuncs) {
950 .ctor = nvc0_graph_ctor,
951 .dtor = nvc0_graph_dtor,
952 .init = nvc0_graph_init,
953 .fini = _nouveau_graph_fini,
954 },
955};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
new file mode 100644
index 000000000000..18d2210e12eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__
27
28#include <core/client.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31#include <core/option.h>
32
33#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36#include <subdev/timer.h>
37
38#include <engine/fifo.h>
39#include <engine/graph.h>
40
41#define GPC_MAX 4
42#define TPC_MAX 32
43
44#define ROP_BCAST(r) (0x408800 + (r))
45#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
46#define GPC_BCAST(r) (0x418000 + (r))
47#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
48#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
49
50struct nvc0_graph_data {
51 u32 size;
52 u32 align;
53 u32 access;
54};
55
56struct nvc0_graph_mmio {
57 u32 addr;
58 u32 data;
59 u32 shift;
60 u32 buffer;
61};
62
63struct nvc0_graph_fuc {
64 u32 *data;
65 u32 size;
66};
67
68struct nvc0_graph_priv {
69 struct nouveau_graph base;
70
71 struct nvc0_graph_fuc fuc409c;
72 struct nvc0_graph_fuc fuc409d;
73 struct nvc0_graph_fuc fuc41ac;
74 struct nvc0_graph_fuc fuc41ad;
75 bool firmware;
76
77 u8 rop_nr;
78 u8 gpc_nr;
79 u8 tpc_nr[GPC_MAX];
80 u8 tpc_total;
81
82 struct nouveau_gpuobj *unk4188b4;
83 struct nouveau_gpuobj *unk4188b8;
84
85 struct nvc0_graph_data mmio_data[4];
86 struct nvc0_graph_mmio mmio_list[4096/8];
87 u32 size;
88 u32 *data;
89
90 u8 magic_not_rop_nr;
91};
92
93struct nvc0_graph_chan {
94 struct nouveau_graph_chan base;
95
96 struct nouveau_gpuobj *mmio;
97 struct nouveau_vma mmio_vma;
98 int mmio_nr;
99 struct {
100 struct nouveau_gpuobj *mem;
101 struct nouveau_vma vma;
102 } data[4];
103};
104
105static inline u32
106nvc0_graph_class(void *obj)
107{
108 struct nouveau_device *device = nv_device(obj);
109
110 switch (device->chipset) {
111 case 0xc0:
112 case 0xc3:
113 case 0xc4:
114 case 0xce: /* guess, mmio trace shows only 0x9097 state */
115 case 0xcf: /* guess, mmio trace shows only 0x9097 state */
116 return 0x9097;
117 case 0xc1:
118 return 0x9197;
119 case 0xc8:
120 case 0xd9:
121 return 0x9297;
122 case 0xe4:
123 case 0xe7:
124 return 0xa097;
125 default:
126 return 0;
127 }
128}
129
130void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
131
132static inline void
133nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
134{
135 nv_wr32(priv, 0x40448c, data);
136 nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
137}
138
139struct nvc0_grctx {
140 struct nvc0_graph_priv *priv;
141 struct nvc0_graph_data *data;
142 struct nvc0_graph_mmio *mmio;
143 struct nouveau_gpuobj *chan;
144 int buffer_nr;
145 u64 buffer[4];
146 u64 addr;
147};
148
149int nvc0_grctx_generate(struct nvc0_graph_priv *);
150int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
151void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
152void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
153int nvc0_grctx_fini(struct nvc0_grctx *);
154
155int nve0_grctx_generate(struct nvc0_graph_priv *);
156
157#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
158#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
159
160void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
161int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
162 struct nvc0_graph_fuc *);
163void nvc0_graph_dtor(struct nouveau_object *);
164void nvc0_graph_init_fw(struct nvc0_graph_priv *, u32 base,
165 struct nvc0_graph_fuc *, struct nvc0_graph_fuc *);
166int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
167 struct nouveau_oclass *, void *, u32,
168 struct nouveau_object **);
169void nvc0_graph_context_dtor(struct nouveau_object *);
170
171#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
new file mode 100644
index 000000000000..539d4c72f192
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -0,0 +1,576 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nvc0.h"
26#include "fuc/hubnve0.fuc.h"
27#include "fuc/gpcnve0.fuc.h"
28
29/*******************************************************************************
30 * Graphics object classes
31 ******************************************************************************/
32
33static struct nouveau_oclass
34nve0_graph_sclass[] = {
35 { 0x902d, &nouveau_object_ofuncs },
36 { 0xa040, &nouveau_object_ofuncs },
37 { 0xa097, &nouveau_object_ofuncs },
38 { 0xa0c0, &nouveau_object_ofuncs },
39 { 0xa0b5, &nouveau_object_ofuncs },
40 {}
41};
42
43/*******************************************************************************
44 * PGRAPH context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_graph_cclass = {
49 .handle = NV_ENGCTX(GR, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = nvc0_graph_context_ctor,
52 .dtor = nvc0_graph_context_dtor,
53 .init = _nouveau_graph_context_init,
54 .fini = _nouveau_graph_context_fini,
55 .rd32 = _nouveau_graph_context_rd32,
56 .wr32 = _nouveau_graph_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PGRAPH engine/subdev functions
62 ******************************************************************************/
63
64static void
65nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
66{
67 u32 ustat = nv_rd32(priv, 0x409c18);
68
69 if (ustat & 0x00000001)
70 nv_error(priv, "CTXCTRL ucode error\n");
71 if (ustat & 0x00080000)
72 nv_error(priv, "CTXCTRL watchdog timeout\n");
73 if (ustat & ~0x00080001)
74 nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
75
76 nvc0_graph_ctxctl_debug(priv);
77 nv_wr32(priv, 0x409c20, ustat);
78}
79
80static void
81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
82{
83 u32 trap = nv_rd32(priv, 0x400108);
84 int rop;
85
86 if (trap & 0x00000001) {
87 u32 stat = nv_rd32(priv, 0x404000);
88 nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n",
89 chid, inst, stat);
90 nv_wr32(priv, 0x404000, 0xc0000000);
91 nv_wr32(priv, 0x400108, 0x00000001);
92 trap &= ~0x00000001;
93 }
94
95 if (trap & 0x00000010) {
96 u32 stat = nv_rd32(priv, 0x405840);
97 nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n",
98 chid, inst, stat);
99 nv_wr32(priv, 0x405840, 0xc0000000);
100 nv_wr32(priv, 0x400108, 0x00000010);
101 trap &= ~0x00000010;
102 }
103
104 if (trap & 0x02000000) {
105 for (rop = 0; rop < priv->rop_nr; rop++) {
106 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
107 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
108 nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n",
109 rop, chid, inst, statz, statc);
110 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
111 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
112 }
113 nv_wr32(priv, 0x400108, 0x02000000);
114 trap &= ~0x02000000;
115 }
116
117 if (trap) {
118 nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n",
119 chid, inst, trap);
120 nv_wr32(priv, 0x400108, trap);
121 }
122}
123
124static void
125nve0_graph_intr(struct nouveau_subdev *subdev)
126{
127 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
128 struct nouveau_engine *engine = nv_engine(subdev);
129 struct nouveau_object *engctx;
130 struct nouveau_handle *handle;
131 struct nvc0_graph_priv *priv = (void *)subdev;
132 u64 inst = nv_rd32(priv, 0x409b00) & 0x0fffffff;
133 u32 stat = nv_rd32(priv, 0x400100);
134 u32 addr = nv_rd32(priv, 0x400704);
135 u32 mthd = (addr & 0x00003ffc);
136 u32 subc = (addr & 0x00070000) >> 16;
137 u32 data = nv_rd32(priv, 0x400708);
138 u32 code = nv_rd32(priv, 0x400110);
139 u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
140 int chid;
141
142 engctx = nouveau_engctx_get(engine, inst);
143 chid = pfifo->chid(pfifo, engctx);
144
145 if (stat & 0x00000010) {
146 handle = nouveau_handle_get_class(engctx, class);
147 if (!handle || nv_call(handle->object, mthd, data)) {
148 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] "
149 "subc %d class 0x%04x mthd 0x%04x "
150 "data 0x%08x\n",
151 chid, inst, subc, class, mthd, data);
152 }
153 nouveau_handle_put(handle);
154 nv_wr32(priv, 0x400100, 0x00000010);
155 stat &= ~0x00000010;
156 }
157
158 if (stat & 0x00000020) {
159 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
160 "class 0x%04x mthd 0x%04x data 0x%08x\n",
161 chid, inst, subc, class, mthd, data);
162 nv_wr32(priv, 0x400100, 0x00000020);
163 stat &= ~0x00000020;
164 }
165
166 if (stat & 0x00100000) {
167 nv_error(priv, "DATA_ERROR [");
168 nouveau_enum_print(nv50_data_error_names, code);
169 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
170 "mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, class, mthd, data);
172 nv_wr32(priv, 0x400100, 0x00100000);
173 stat &= ~0x00100000;
174 }
175
176 if (stat & 0x00200000) {
177 nve0_graph_trap_isr(priv, chid, inst);
178 nv_wr32(priv, 0x400100, 0x00200000);
179 stat &= ~0x00200000;
180 }
181
182 if (stat & 0x00080000) {
183 nve0_graph_ctxctl_isr(priv);
184 nv_wr32(priv, 0x400100, 0x00080000);
185 stat &= ~0x00080000;
186 }
187
188 if (stat) {
189 nv_error(priv, "unknown stat 0x%08x\n", stat);
190 nv_wr32(priv, 0x400100, stat);
191 }
192
193 nv_wr32(priv, 0x400500, 0x00010001);
194 nouveau_engctx_put(engctx);
195}
196
197static int
198nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
199 struct nouveau_oclass *oclass, void *data, u32 size,
200 struct nouveau_object **pobject)
201{
202 struct nouveau_device *device = nv_device(parent);
203 struct nvc0_graph_priv *priv;
204 int ret, i;
205
206 ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
207 *pobject = nv_object(priv);
208 if (ret)
209 return ret;
210
211 nv_subdev(priv)->unit = 0x18001000;
212 nv_subdev(priv)->intr = nve0_graph_intr;
213 nv_engine(priv)->cclass = &nve0_graph_cclass;
214 nv_engine(priv)->sclass = nve0_graph_sclass;
215
216 if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
217 nv_info(priv, "using external firmware\n");
218 if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
219 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
220 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
221 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
222 return -EINVAL;
223 priv->firmware = true;
224 }
225
226 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
227 if (ret)
228 return ret;
229
230 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
231 if (ret)
232 return ret;
233
234 for (i = 0; i < 0x1000; i += 4) {
235 nv_wo32(priv->unk4188b4, i, 0x00000010);
236 nv_wo32(priv->unk4188b8, i, 0x00000010);
237 }
238
239 priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
240 priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
241 for (i = 0; i < priv->gpc_nr; i++) {
242 priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
243 priv->tpc_total += priv->tpc_nr[i];
244 }
245
246 switch (nv_device(priv)->chipset) {
247 case 0xe4:
248 if (priv->tpc_total == 8)
249 priv->magic_not_rop_nr = 3;
250 else
251 if (priv->tpc_total == 7)
252 priv->magic_not_rop_nr = 1;
253 break;
254 case 0xe7:
255 priv->magic_not_rop_nr = 1;
256 break;
257 default:
258 break;
259 }
260
261 return 0;
262}
263
264static void
265nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
266{
267 int i;
268
269 nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
270 nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
271 for (i = 0; i < 4; i++)
272 nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
273 nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
274 nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
275}
276
277static void
278nve0_graph_init_regs(struct nvc0_graph_priv *priv)
279{
280 nv_wr32(priv, 0x400080, 0x003083c2);
281 nv_wr32(priv, 0x400088, 0x0001ffe7);
282 nv_wr32(priv, 0x40008c, 0x00000000);
283 nv_wr32(priv, 0x400090, 0x00000030);
284 nv_wr32(priv, 0x40013c, 0x003901f7);
285 nv_wr32(priv, 0x400140, 0x00000100);
286 nv_wr32(priv, 0x400144, 0x00000000);
287 nv_wr32(priv, 0x400148, 0x00000110);
288 nv_wr32(priv, 0x400138, 0x00000000);
289 nv_wr32(priv, 0x400130, 0x00000000);
290 nv_wr32(priv, 0x400134, 0x00000000);
291 nv_wr32(priv, 0x400124, 0x00000002);
292}
293
294static void
295nve0_graph_init_units(struct nvc0_graph_priv *priv)
296{
297 nv_wr32(priv, 0x409ffc, 0x00000000);
298 nv_wr32(priv, 0x409c14, 0x00003e3e);
299 nv_wr32(priv, 0x409c24, 0x000f0000);
300
301 nv_wr32(priv, 0x404000, 0xc0000000);
302 nv_wr32(priv, 0x404600, 0xc0000000);
303 nv_wr32(priv, 0x408030, 0xc0000000);
304 nv_wr32(priv, 0x404490, 0xc0000000);
305 nv_wr32(priv, 0x406018, 0xc0000000);
306 nv_wr32(priv, 0x407020, 0xc0000000);
307 nv_wr32(priv, 0x405840, 0xc0000000);
308 nv_wr32(priv, 0x405844, 0x00ffffff);
309
310 nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
311 nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
312
313}
314
315static void
316nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
317{
318 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
319 u32 data[TPC_MAX / 8];
320 u8 tpcnr[GPC_MAX];
321 int i, gpc, tpc;
322
323 nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
324
325 memset(data, 0x00, sizeof(data));
326 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
327 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
328 do {
329 gpc = (gpc + 1) % priv->gpc_nr;
330 } while (!tpcnr[gpc]);
331 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
332
333 data[i / 8] |= tpc << ((i % 8) * 4);
334 }
335
336 nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
337 nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
338 nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
339 nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
340
341 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
342 nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
343 priv->tpc_nr[gpc]);
344 nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
345 nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
346 }
347
348 nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
349 nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
350}
351
352static void
353nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
354{
355 int gpc, tpc;
356
357 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
358 nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
359 nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
360 nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
361 nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
362 nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
363 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
364 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
365 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
366 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
367 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
368 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
369 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
370 nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
371 }
372 nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
373 nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
374 }
375}
376
377static void
378nve0_graph_init_rop(struct nvc0_graph_priv *priv)
379{
380 int rop;
381
382 for (rop = 0; rop < priv->rop_nr; rop++) {
383 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
384 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
385 nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
386 nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
387 }
388}
389
390static int
391nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
392{
393 u32 r000260;
394 int i;
395
396 if (priv->firmware) {
397 /* load fuc microcode */
398 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
399 nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
400 nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
401 nv_wr32(priv, 0x000260, r000260);
402
403 /* start both of them running */
404 nv_wr32(priv, 0x409840, 0xffffffff);
405 nv_wr32(priv, 0x41a10c, 0x00000000);
406 nv_wr32(priv, 0x40910c, 0x00000000);
407 nv_wr32(priv, 0x41a100, 0x00000002);
408 nv_wr32(priv, 0x409100, 0x00000002);
409 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
410 nv_error(priv, "0x409800 wait failed\n");
411
412 nv_wr32(priv, 0x409840, 0xffffffff);
413 nv_wr32(priv, 0x409500, 0x7fffffff);
414 nv_wr32(priv, 0x409504, 0x00000021);
415
416 nv_wr32(priv, 0x409840, 0xffffffff);
417 nv_wr32(priv, 0x409500, 0x00000000);
418 nv_wr32(priv, 0x409504, 0x00000010);
419 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
420 nv_error(priv, "fuc09 req 0x10 timeout\n");
421 return -EBUSY;
422 }
423 priv->size = nv_rd32(priv, 0x409800);
424
425 nv_wr32(priv, 0x409840, 0xffffffff);
426 nv_wr32(priv, 0x409500, 0x00000000);
427 nv_wr32(priv, 0x409504, 0x00000016);
428 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
429 nv_error(priv, "fuc09 req 0x16 timeout\n");
430 return -EBUSY;
431 }
432
433 nv_wr32(priv, 0x409840, 0xffffffff);
434 nv_wr32(priv, 0x409500, 0x00000000);
435 nv_wr32(priv, 0x409504, 0x00000025);
436 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
437 nv_error(priv, "fuc09 req 0x25 timeout\n");
438 return -EBUSY;
439 }
440
441 nv_wr32(priv, 0x409800, 0x00000000);
442 nv_wr32(priv, 0x409500, 0x00000001);
443 nv_wr32(priv, 0x409504, 0x00000030);
444 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
445 nv_error(priv, "fuc09 req 0x30 timeout\n");
446 return -EBUSY;
447 }
448
449 nv_wr32(priv, 0x409810, 0xb00095c8);
450 nv_wr32(priv, 0x409800, 0x00000000);
451 nv_wr32(priv, 0x409500, 0x00000001);
452 nv_wr32(priv, 0x409504, 0x00000031);
453 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
454 nv_error(priv, "fuc09 req 0x31 timeout\n");
455 return -EBUSY;
456 }
457
458 nv_wr32(priv, 0x409810, 0x00080420);
459 nv_wr32(priv, 0x409800, 0x00000000);
460 nv_wr32(priv, 0x409500, 0x00000001);
461 nv_wr32(priv, 0x409504, 0x00000032);
462 if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
463 nv_error(priv, "fuc09 req 0x32 timeout\n");
464 return -EBUSY;
465 }
466
467 nv_wr32(priv, 0x409614, 0x00000070);
468 nv_wr32(priv, 0x409614, 0x00000770);
469 nv_wr32(priv, 0x40802c, 0x00000001);
470
471 if (priv->data == NULL) {
472 int ret = nve0_grctx_generate(priv);
473 if (ret) {
474 nv_error(priv, "failed to construct context\n");
475 return ret;
476 }
477 }
478
479 return 0;
480 }
481
482 /* load HUB microcode */
483 r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
484 nv_wr32(priv, 0x4091c0, 0x01000000);
485 for (i = 0; i < sizeof(nve0_grhub_data) / 4; i++)
486 nv_wr32(priv, 0x4091c4, nve0_grhub_data[i]);
487
488 nv_wr32(priv, 0x409180, 0x01000000);
489 for (i = 0; i < sizeof(nve0_grhub_code) / 4; i++) {
490 if ((i & 0x3f) == 0)
491 nv_wr32(priv, 0x409188, i >> 6);
492 nv_wr32(priv, 0x409184, nve0_grhub_code[i]);
493 }
494
495 /* load GPC microcode */
496 nv_wr32(priv, 0x41a1c0, 0x01000000);
497 for (i = 0; i < sizeof(nve0_grgpc_data) / 4; i++)
498 nv_wr32(priv, 0x41a1c4, nve0_grgpc_data[i]);
499
500 nv_wr32(priv, 0x41a180, 0x01000000);
501 for (i = 0; i < sizeof(nve0_grgpc_code) / 4; i++) {
502 if ((i & 0x3f) == 0)
503 nv_wr32(priv, 0x41a188, i >> 6);
504 nv_wr32(priv, 0x41a184, nve0_grgpc_code[i]);
505 }
506 nv_wr32(priv, 0x000260, r000260);
507
508 /* start HUB ucode running, it'll init the GPCs */
509 nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
510 nv_wr32(priv, 0x40910c, 0x00000000);
511 nv_wr32(priv, 0x409100, 0x00000002);
512 if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
513 nv_error(priv, "HUB_INIT timed out\n");
514 nvc0_graph_ctxctl_debug(priv);
515 return -EBUSY;
516 }
517
518 priv->size = nv_rd32(priv, 0x409804);
519 if (priv->data == NULL) {
520 int ret = nve0_grctx_generate(priv);
521 if (ret) {
522 nv_error(priv, "failed to construct context\n");
523 return ret;
524 }
525 }
526
527 return 0;
528}
529
530static int
531nve0_graph_init(struct nouveau_object *object)
532{
533 struct nvc0_graph_priv *priv = (void *)object;
534 int ret;
535
536 ret = nouveau_graph_init(&priv->base);
537 if (ret)
538 return ret;
539
540 nve0_graph_init_obj418880(priv);
541 nve0_graph_init_regs(priv);
542 nve0_graph_init_gpc_0(priv);
543
544 nv_wr32(priv, 0x400500, 0x00010001);
545 nv_wr32(priv, 0x400100, 0xffffffff);
546 nv_wr32(priv, 0x40013c, 0xffffffff);
547
548 nve0_graph_init_units(priv);
549 nve0_graph_init_gpc_1(priv);
550 nve0_graph_init_rop(priv);
551
552 nv_wr32(priv, 0x400108, 0xffffffff);
553 nv_wr32(priv, 0x400138, 0xffffffff);
554 nv_wr32(priv, 0x400118, 0xffffffff);
555 nv_wr32(priv, 0x400130, 0xffffffff);
556 nv_wr32(priv, 0x40011c, 0xffffffff);
557 nv_wr32(priv, 0x400134, 0xffffffff);
558 nv_wr32(priv, 0x400054, 0x34ce3464);
559
560 ret = nve0_graph_init_ctxctl(priv);
561 if (ret)
562 return ret;
563
564 return 0;
565}
566
567struct nouveau_oclass
568nve0_graph_oclass = {
569 .handle = NV_ENGINE(GR, 0xe0),
570 .ofuncs = &(struct nouveau_ofuncs) {
571 .ctor = nve0_graph_ctor,
572 .dtor = nvc0_graph_dtor,
573 .init = nve0_graph_init,
574 .fini = _nouveau_graph_fini,
575 },
576};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 000000000000..9c715a25cecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,269 @@
1#ifndef __NOUVEAU_GRAPH_REGS_H__
2#define __NOUVEAU_GRAPH_REGS_H__
3
4#define NV04_PGRAPH_DEBUG_0 0x00400080
5#define NV04_PGRAPH_DEBUG_1 0x00400084
6#define NV04_PGRAPH_DEBUG_2 0x00400088
7#define NV04_PGRAPH_DEBUG_3 0x0040008c
8#define NV10_PGRAPH_DEBUG_4 0x00400090
9#define NV03_PGRAPH_INTR 0x00400100
10#define NV03_PGRAPH_NSTATUS 0x00400104
11# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
12# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
13# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
14# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
15# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
16# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
17# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
18# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
19#define NV03_PGRAPH_NSOURCE 0x00400108
20# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
21# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
22# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
23# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
24# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
25# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
26# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
27# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
28# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
29# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
30# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
31# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
32# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
33# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
34# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
35# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
36# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
37# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
38# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
39#define NV03_PGRAPH_INTR_EN 0x00400140
40#define NV40_PGRAPH_INTR_EN 0x0040013C
41# define NV_PGRAPH_INTR_NOTIFY (1<<0)
42# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
43# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
44# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
45# define NV_PGRAPH_INTR_ERROR (1<<20)
46#define NV10_PGRAPH_CTX_CONTROL 0x00400144
47#define NV10_PGRAPH_CTX_USER 0x00400148
48#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
49#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
50#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
51 + 0x4*(i) + 0x20*(j))
52#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
53#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
54#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
55#define NV04_PGRAPH_CTX_CONTROL 0x00400170
56#define NV04_PGRAPH_CTX_USER 0x00400174
57#define NV04_PGRAPH_CTX_CACHE1 0x00400180
58#define NV03_PGRAPH_CTX_CONTROL 0x00400190
59#define NV03_PGRAPH_CTX_USER 0x00400194
60#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
61#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
62#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
63#define NV40_PGRAPH_CTXCTL_0304 0x00400304
64#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
65#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
66#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
67#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
68#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
69#define NV40_PGRAPH_CTXCTL_0310 0x00400310
70#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
71#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
72#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
73#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
74#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
75#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
76#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
77#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
78#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
79#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
80#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
81#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
82#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
83#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
84#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
85#define NV03_PGRAPH_ABS_X_RAM 0x00400400
86#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
87#define NV03_PGRAPH_X_MISC 0x00400500
88#define NV03_PGRAPH_Y_MISC 0x00400504
89#define NV04_PGRAPH_VALID1 0x00400508
90#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
91#define NV04_PGRAPH_MISC24_0 0x00400510
92#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
93#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
94#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
95#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
96#define NV03_PGRAPH_CLIPX_0 0x00400524
97#define NV03_PGRAPH_CLIPX_1 0x00400528
98#define NV03_PGRAPH_CLIPY_0 0x0040052C
99#define NV03_PGRAPH_CLIPY_1 0x00400530
100#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
101#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
102#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
103#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
104#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
105#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
106#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
107#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
108#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
109#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
110#define NV04_PGRAPH_MISC24_1 0x00400570
111#define NV04_PGRAPH_MISC24_2 0x00400574
112#define NV04_PGRAPH_VALID2 0x00400578
113#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
114#define NV04_PGRAPH_PASSTHRU_1 0x00400580
115#define NV04_PGRAPH_PASSTHRU_2 0x00400584
116#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
117#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
118#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
119#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
120#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
121#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
122#define NV04_PGRAPH_FORMAT_0 0x004005A8
123#define NV04_PGRAPH_FORMAT_1 0x004005AC
124#define NV04_PGRAPH_FILTER_0 0x004005B0
125#define NV04_PGRAPH_FILTER_1 0x004005B4
126#define NV03_PGRAPH_MONO_COLOR0 0x00400600
127#define NV04_PGRAPH_ROP3 0x00400604
128#define NV04_PGRAPH_BETA_AND 0x00400608
129#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
130#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
131#define NV04_PGRAPH_FORMATS 0x00400618
132#define NV10_PGRAPH_DEBUG_2 0x00400620
133#define NV04_PGRAPH_BOFFSET0 0x00400640
134#define NV04_PGRAPH_BOFFSET1 0x00400644
135#define NV04_PGRAPH_BOFFSET2 0x00400648
136#define NV04_PGRAPH_BOFFSET3 0x0040064C
137#define NV04_PGRAPH_BOFFSET4 0x00400650
138#define NV04_PGRAPH_BOFFSET5 0x00400654
139#define NV04_PGRAPH_BBASE0 0x00400658
140#define NV04_PGRAPH_BBASE1 0x0040065C
141#define NV04_PGRAPH_BBASE2 0x00400660
142#define NV04_PGRAPH_BBASE3 0x00400664
143#define NV04_PGRAPH_BBASE4 0x00400668
144#define NV04_PGRAPH_BBASE5 0x0040066C
145#define NV04_PGRAPH_BPITCH0 0x00400670
146#define NV04_PGRAPH_BPITCH1 0x00400674
147#define NV04_PGRAPH_BPITCH2 0x00400678
148#define NV04_PGRAPH_BPITCH3 0x0040067C
149#define NV04_PGRAPH_BPITCH4 0x00400680
150#define NV04_PGRAPH_BLIMIT0 0x00400684
151#define NV04_PGRAPH_BLIMIT1 0x00400688
152#define NV04_PGRAPH_BLIMIT2 0x0040068C
153#define NV04_PGRAPH_BLIMIT3 0x00400690
154#define NV04_PGRAPH_BLIMIT4 0x00400694
155#define NV04_PGRAPH_BLIMIT5 0x00400698
156#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
157#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
158#define NV03_PGRAPH_STATUS 0x004006B0
159#define NV04_PGRAPH_STATUS 0x00400700
160# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
161#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
162#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
163#define NV04_PGRAPH_SURFACE 0x0040070C
164#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
165#define NV04_PGRAPH_STATE 0x00400710
166#define NV10_PGRAPH_SURFACE 0x00400710
167#define NV04_PGRAPH_NOTIFY 0x00400714
168#define NV10_PGRAPH_STATE 0x00400714
169#define NV10_PGRAPH_NOTIFY 0x00400718
170
171#define NV04_PGRAPH_FIFO 0x00400720
172
173#define NV04_PGRAPH_BPIXEL 0x00400724
174#define NV10_PGRAPH_RDI_INDEX 0x00400750
175#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
176#define NV10_PGRAPH_RDI_DATA 0x00400754
177#define NV04_PGRAPH_DMA_PITCH 0x00400760
178#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
179#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
180#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
181#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
182#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
183#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
184#define NV10_PGRAPH_DMA_PITCH 0x00400770
185#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
186#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
187#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
188#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
189#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
190#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
191#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
192#define NV04_PGRAPH_PATT_COLOR0 0x00400800
193#define NV04_PGRAPH_PATT_COLOR1 0x00400804
194#define NV04_PGRAPH_PATTERN 0x00400808
195#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
196#define NV04_PGRAPH_CHROMA 0x00400814
197#define NV04_PGRAPH_CONTROL0 0x00400818
198#define NV04_PGRAPH_CONTROL1 0x0040081C
199#define NV04_PGRAPH_CONTROL2 0x00400820
200#define NV04_PGRAPH_BLEND 0x00400824
201#define NV04_PGRAPH_STORED_FMT 0x00400830
202#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
203#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
204#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
208#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
209#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
210#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
211#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
212#define NV04_PGRAPH_U_RAM 0x00400D00
213#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
214#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
215#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
216#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
217#define NV04_PGRAPH_V_RAM 0x00400D40
218#define NV04_PGRAPH_W_RAM 0x00400D80
219#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
220#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
221#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
222#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
223#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
224#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
225#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
226#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
227#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
228#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
229#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
230#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
231#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
232#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
233#define NV10_PGRAPH_XFMODE0 0x00400F40
234#define NV10_PGRAPH_XFMODE1 0x00400F44
235#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
236#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
237#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
238#define NV10_PGRAPH_PIPE_DATA 0x00400F54
239#define NV04_PGRAPH_DMA_START_0 0x00401000
240#define NV04_PGRAPH_DMA_START_1 0x00401004
241#define NV04_PGRAPH_DMA_LENGTH 0x00401008
242#define NV04_PGRAPH_DMA_MISC 0x0040100C
243#define NV04_PGRAPH_DMA_DATA_0 0x00401020
244#define NV04_PGRAPH_DMA_DATA_1 0x00401024
245#define NV04_PGRAPH_DMA_RM 0x00401030
246#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
247#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
248#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
249#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
250#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
251#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
252#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
253#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
254#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
255#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
256#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
257#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
258#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
259#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
260#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
261#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
262#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
263#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
264#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
265#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
266#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
267#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
268
269#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
new file mode 100644
index 000000000000..1f394a2629e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/handle.h>
29
30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33
34#include <engine/fifo.h>
35#include <engine/mpeg.h>
36#include <engine/graph/nv40.h>
37
38struct nv31_mpeg_priv {
39 struct nouveau_mpeg base;
40 atomic_t refcount;
41};
42
43struct nv31_mpeg_chan {
44 struct nouveau_object base;
45};
46
47/*******************************************************************************
48 * MPEG object classes
49 ******************************************************************************/
50
51static int
52nv31_mpeg_object_ctor(struct nouveau_object *parent,
53 struct nouveau_object *engine,
54 struct nouveau_oclass *oclass, void *data, u32 size,
55 struct nouveau_object **pobject)
56{
57 struct nouveau_gpuobj *obj;
58 int ret;
59
60 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
61 20, 16, 0, &obj);
62 *pobject = nv_object(obj);
63 if (ret)
64 return ret;
65
66 nv_wo32(obj, 0x00, nv_mclass(obj));
67 nv_wo32(obj, 0x04, 0x00000000);
68 nv_wo32(obj, 0x08, 0x00000000);
69 nv_wo32(obj, 0x0c, 0x00000000);
70 return 0;
71}
72
73static int
74nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
75{
76 struct nouveau_instmem *imem = nouveau_instmem(object);
77 struct nv31_mpeg_priv *priv = (void *)object->engine;
78 u32 inst = *(u32 *)arg << 4;
79 u32 dma0 = nv_ro32(imem, inst + 0);
80 u32 dma1 = nv_ro32(imem, inst + 4);
81 u32 dma2 = nv_ro32(imem, inst + 8);
82 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
83 u32 size = dma1 + 1;
84
85 /* only allow linear DMA objects */
86 if (!(dma0 & 0x00002000))
87 return -EINVAL;
88
89 if (mthd == 0x0190) {
90 /* DMA_CMD */
91 nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
92 nv_wr32(priv, 0x00b334, base);
93 nv_wr32(priv, 0x00b324, size);
94 } else
95 if (mthd == 0x01a0) {
96 /* DMA_DATA */
97 nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
98 nv_wr32(priv, 0x00b360, base);
99 nv_wr32(priv, 0x00b364, size);
100 } else {
101 /* DMA_IMAGE, VRAM only */
102 if (dma0 & 0x000c0000)
103 return -EINVAL;
104
105 nv_wr32(priv, 0x00b370, base);
106 nv_wr32(priv, 0x00b374, size);
107 }
108
109 return 0;
110}
111
112static struct nouveau_ofuncs
113nv31_mpeg_ofuncs = {
114 .ctor = nv31_mpeg_object_ctor,
115 .dtor = _nouveau_gpuobj_dtor,
116 .init = _nouveau_gpuobj_init,
117 .fini = _nouveau_gpuobj_fini,
118 .rd32 = _nouveau_gpuobj_rd32,
119 .wr32 = _nouveau_gpuobj_wr32,
120};
121
122static struct nouveau_omthds
123nv31_mpeg_omthds[] = {
124 { 0x0190, nv31_mpeg_mthd_dma },
125 { 0x01a0, nv31_mpeg_mthd_dma },
126 { 0x01b0, nv31_mpeg_mthd_dma },
127 {}
128};
129
130struct nouveau_oclass
131nv31_mpeg_sclass[] = {
132 { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
133 {}
134};
135
136/*******************************************************************************
137 * PMPEG context
138 ******************************************************************************/
139
140static int
141nv31_mpeg_context_ctor(struct nouveau_object *parent,
142 struct nouveau_object *engine,
143 struct nouveau_oclass *oclass, void *data, u32 size,
144 struct nouveau_object **pobject)
145{
146 struct nv31_mpeg_priv *priv = (void *)engine;
147 struct nv31_mpeg_chan *chan;
148 int ret;
149
150 if (!atomic_add_unless(&priv->refcount, 1, 1))
151 return -EBUSY;
152
153 ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
154 *pobject = nv_object(chan);
155 if (ret)
156 return ret;
157
158 return 0;
159}
160
161static void
162nv31_mpeg_context_dtor(struct nouveau_object *object)
163{
164 struct nv31_mpeg_priv *priv = (void *)object->engine;
165 struct nv31_mpeg_chan *chan = (void *)object;
166 atomic_dec(&priv->refcount);
167 nouveau_object_destroy(&chan->base);
168}
169
170static struct nouveau_oclass
171nv31_mpeg_cclass = {
172 .handle = NV_ENGCTX(MPEG, 0x31),
173 .ofuncs = &(struct nouveau_ofuncs) {
174 .ctor = nv31_mpeg_context_ctor,
175 .dtor = nv31_mpeg_context_dtor,
176 .init = nouveau_object_init,
177 .fini = nouveau_object_fini,
178 },
179};
180
181/*******************************************************************************
182 * PMPEG engine/subdev functions
183 ******************************************************************************/
184
185void
186nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
187{
188 struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
189 struct nv31_mpeg_priv *priv = (void *)engine;
190
191 nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
192 nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
193 nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
194}
195
196void
197nv31_mpeg_intr(struct nouveau_subdev *subdev)
198{
199 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
200 struct nouveau_engine *engine = nv_engine(subdev);
201 struct nouveau_object *engctx;
202 struct nouveau_handle *handle;
203 struct nv31_mpeg_priv *priv = (void *)subdev;
204 u32 inst = nv_rd32(priv, 0x00b318) & 0x000fffff;
205 u32 stat = nv_rd32(priv, 0x00b100);
206 u32 type = nv_rd32(priv, 0x00b230);
207 u32 mthd = nv_rd32(priv, 0x00b234);
208 u32 data = nv_rd32(priv, 0x00b238);
209 u32 show = stat;
210 int chid;
211
212 engctx = nouveau_engctx_get(engine, inst);
213 chid = pfifo->chid(pfifo, engctx);
214
215 if (stat & 0x01000000) {
216 /* happens on initial binding of the object */
217 if (type == 0x00000020 && mthd == 0x0000) {
218 nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
219 show &= ~0x01000000;
220 }
221
222 if (type == 0x00000010) {
223 handle = nouveau_handle_get_class(engctx, 0x3174);
224 if (handle && !nv_call(handle->object, mthd, data))
225 show &= ~0x01000000;
226 nouveau_handle_put(handle);
227 }
228 }
229
230 nv_wr32(priv, 0x00b100, stat);
231 nv_wr32(priv, 0x00b230, 0x00000001);
232
233 if (show) {
234 nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
235 chid, inst << 4, stat, type, mthd, data);
236 }
237
238 nouveau_engctx_put(engctx);
239}
240
241static int
242nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
243 struct nouveau_oclass *oclass, void *data, u32 size,
244 struct nouveau_object **pobject)
245{
246 struct nv31_mpeg_priv *priv;
247 int ret;
248
249 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
250 *pobject = nv_object(priv);
251 if (ret)
252 return ret;
253
254 nv_subdev(priv)->unit = 0x00000002;
255 nv_subdev(priv)->intr = nv31_mpeg_intr;
256 nv_engine(priv)->cclass = &nv31_mpeg_cclass;
257 nv_engine(priv)->sclass = nv31_mpeg_sclass;
258 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
259 return 0;
260}
261
262int
263nv31_mpeg_init(struct nouveau_object *object)
264{
265 struct nouveau_engine *engine = nv_engine(object->engine);
266 struct nv31_mpeg_priv *priv = (void *)engine;
267 struct nouveau_fb *pfb = nouveau_fb(object);
268 int ret, i;
269
270 ret = nouveau_mpeg_init(&priv->base);
271 if (ret)
272 return ret;
273
274 /* VPE init */
275 nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
276 nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
277
278 for (i = 0; i < pfb->tile.regions; i++)
279 engine->tile_prog(engine, i);
280
281 /* PMPEG init */
282 nv_wr32(priv, 0x00b32c, 0x00000000);
283 nv_wr32(priv, 0x00b314, 0x00000100);
284 nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
285 nv_wr32(priv, 0x00b300, 0x02001ec1);
286 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
287
288 nv_wr32(priv, 0x00b100, 0xffffffff);
289 nv_wr32(priv, 0x00b140, 0xffffffff);
290
291 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
292 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
293 return -EBUSY;
294 }
295
296 return 0;
297}
298
299struct nouveau_oclass
300nv31_mpeg_oclass = {
301 .handle = NV_ENGINE(MPEG, 0x31),
302 .ofuncs = &(struct nouveau_ofuncs) {
303 .ctor = nv31_mpeg_ctor,
304 .dtor = _nouveau_mpeg_dtor,
305 .init = nv31_mpeg_init,
306 .fini = _nouveau_mpeg_fini,
307 },
308};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 000000000000..12418574efea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/fb.h>
30#include <subdev/timer.h>
31#include <subdev/instmem.h>
32
33#include <engine/mpeg.h>
34#include <engine/graph/nv40.h>
35
36struct nv40_mpeg_priv {
37 struct nouveau_mpeg base;
38};
39
40struct nv40_mpeg_chan {
41 struct nouveau_mpeg base;
42};
43
44/*******************************************************************************
45 * PMPEG context
46 ******************************************************************************/
47
48static int
49nv40_mpeg_context_ctor(struct nouveau_object *parent,
50 struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nv40_mpeg_chan *chan;
55 int ret;
56
57 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
58 264 * 4, 16,
59 NVOBJ_FLAG_ZERO_ALLOC, &chan);
60 *pobject = nv_object(chan);
61 if (ret)
62 return ret;
63
64 return 0;
65}
66
67static int
68nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
69{
70
71 struct nv40_mpeg_priv *priv = (void *)object->engine;
72 struct nv40_mpeg_chan *chan = (void *)object;
73 u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
74
75 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
76 if (nv_rd32(priv, 0x00b318) == inst)
77 nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
78 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
79 return 0;
80}
81
82static struct nouveau_oclass
83nv40_mpeg_cclass = {
84 .handle = NV_ENGCTX(MPEG, 0x40),
85 .ofuncs = &(struct nouveau_ofuncs) {
86 .ctor = nv40_mpeg_context_ctor,
87 .dtor = _nouveau_mpeg_context_dtor,
88 .init = _nouveau_mpeg_context_init,
89 .fini = nv40_mpeg_context_fini,
90 .rd32 = _nouveau_mpeg_context_rd32,
91 .wr32 = _nouveau_mpeg_context_wr32,
92 },
93};
94
95/*******************************************************************************
96 * PMPEG engine/subdev functions
97 ******************************************************************************/
98
99static void
100nv40_mpeg_intr(struct nouveau_subdev *subdev)
101{
102 struct nv40_mpeg_priv *priv = (void *)subdev;
103 u32 stat;
104
105 if ((stat = nv_rd32(priv, 0x00b100)))
106 nv31_mpeg_intr(subdev);
107
108 if ((stat = nv_rd32(priv, 0x00b800))) {
109 nv_error(priv, "PMSRCH 0x%08x\n", stat);
110 nv_wr32(priv, 0x00b800, stat);
111 }
112}
113
114static int
115nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 struct nouveau_oclass *oclass, void *data, u32 size,
117 struct nouveau_object **pobject)
118{
119 struct nv40_mpeg_priv *priv;
120 int ret;
121
122 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
123 *pobject = nv_object(priv);
124 if (ret)
125 return ret;
126
127 nv_subdev(priv)->unit = 0x00000002;
128 nv_subdev(priv)->intr = nv40_mpeg_intr;
129 nv_engine(priv)->cclass = &nv40_mpeg_cclass;
130 nv_engine(priv)->sclass = nv31_mpeg_sclass;
131 nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
132 return 0;
133}
134
135struct nouveau_oclass
136nv40_mpeg_oclass = {
137 .handle = NV_ENGINE(MPEG, 0x40),
138 .ofuncs = &(struct nouveau_ofuncs) {
139 .ctor = nv40_mpeg_ctor,
140 .dtor = _nouveau_mpeg_dtor,
141 .init = nv31_mpeg_init,
142 .fini = _nouveau_mpeg_fini,
143 },
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
new file mode 100644
index 000000000000..8678a9996d57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -0,0 +1,240 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/vm.h>
30#include <subdev/bar.h>
31#include <subdev/timer.h>
32
33#include <engine/mpeg.h>
34
35struct nv50_mpeg_priv {
36 struct nouveau_mpeg base;
37};
38
39struct nv50_mpeg_chan {
40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
46
47static int
48nv50_mpeg_object_ctor(struct nouveau_object *parent,
49 struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nouveau_gpuobj *obj;
54 int ret;
55
56 ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
57 16, 16, 0, &obj);
58 *pobject = nv_object(obj);
59 if (ret)
60 return ret;
61
62 nv_wo32(obj, 0x00, nv_mclass(obj));
63 nv_wo32(obj, 0x04, 0x00000000);
64 nv_wo32(obj, 0x08, 0x00000000);
65 nv_wo32(obj, 0x0c, 0x00000000);
66 return 0;
67}
68
69struct nouveau_ofuncs
70nv50_mpeg_ofuncs = {
71 .ctor = nv50_mpeg_object_ctor,
72 .dtor = _nouveau_gpuobj_dtor,
73 .init = _nouveau_gpuobj_init,
74 .fini = _nouveau_gpuobj_fini,
75 .rd32 = _nouveau_gpuobj_rd32,
76 .wr32 = _nouveau_gpuobj_wr32,
77};
78
79static struct nouveau_oclass
80nv50_mpeg_sclass[] = {
81 { 0x3174, &nv50_mpeg_ofuncs },
82 {}
83};
84
85/*******************************************************************************
86 * PMPEG context
87 ******************************************************************************/
88
89int
90nv50_mpeg_context_ctor(struct nouveau_object *parent,
91 struct nouveau_object *engine,
92 struct nouveau_oclass *oclass, void *data, u32 size,
93 struct nouveau_object **pobject)
94{
95 struct nouveau_bar *bar = nouveau_bar(parent);
96 struct nv50_mpeg_chan *chan;
97 int ret;
98
99 ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
101 *pobject = nv_object(chan);
102 if (ret)
103 return ret;
104
105 nv_wo32(chan, 0x0070, 0x00801ec1);
106 nv_wo32(chan, 0x007c, 0x0000037c);
107 bar->flush(bar);
108 return 0;
109}
110
111static struct nouveau_oclass
112nv50_mpeg_cclass = {
113 .handle = NV_ENGCTX(MPEG, 0x50),
114 .ofuncs = &(struct nouveau_ofuncs) {
115 .ctor = nv50_mpeg_context_ctor,
116 .dtor = _nouveau_mpeg_context_dtor,
117 .init = _nouveau_mpeg_context_init,
118 .fini = _nouveau_mpeg_context_fini,
119 .rd32 = _nouveau_mpeg_context_rd32,
120 .wr32 = _nouveau_mpeg_context_wr32,
121 },
122};
123
124/*******************************************************************************
125 * PMPEG engine/subdev functions
126 ******************************************************************************/
127
128int
129nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
130{
131 nv50_vm_flush_engine(&engine->base, 0x08);
132 return 0;
133}
134
135void
136nv50_mpeg_intr(struct nouveau_subdev *subdev)
137{
138 struct nv50_mpeg_priv *priv = (void *)subdev;
139 u32 stat = nv_rd32(priv, 0x00b100);
140 u32 type = nv_rd32(priv, 0x00b230);
141 u32 mthd = nv_rd32(priv, 0x00b234);
142 u32 data = nv_rd32(priv, 0x00b238);
143 u32 show = stat;
144
145 if (stat & 0x01000000) {
146 /* happens on initial binding of the object */
147 if (type == 0x00000020 && mthd == 0x0000) {
148 nv_wr32(priv, 0x00b308, 0x00000100);
149 show &= ~0x01000000;
150 }
151 }
152
153 if (show) {
154 nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
155 stat, type, mthd, data);
156 }
157
158 nv_wr32(priv, 0x00b100, stat);
159 nv_wr32(priv, 0x00b230, 0x00000001);
160 nv50_fb_trap(nouveau_fb(priv), 1);
161}
162
163static void
164nv50_vpe_intr(struct nouveau_subdev *subdev)
165{
166 struct nv50_mpeg_priv *priv = (void *)subdev;
167
168 if (nv_rd32(priv, 0x00b100))
169 nv50_mpeg_intr(subdev);
170
171 if (nv_rd32(priv, 0x00b800)) {
172 u32 stat = nv_rd32(priv, 0x00b800);
173 nv_info(priv, "PMSRCH: 0x%08x\n", stat);
174 nv_wr32(priv, 0xb800, stat);
175 }
176}
177
178static int
179nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
180 struct nouveau_oclass *oclass, void *data, u32 size,
181 struct nouveau_object **pobject)
182{
183 struct nv50_mpeg_priv *priv;
184 int ret;
185
186 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
187 *pobject = nv_object(priv);
188 if (ret)
189 return ret;
190
191 nv_subdev(priv)->unit = 0x00400002;
192 nv_subdev(priv)->intr = nv50_vpe_intr;
193 nv_engine(priv)->cclass = &nv50_mpeg_cclass;
194 nv_engine(priv)->sclass = nv50_mpeg_sclass;
195 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
196 return 0;
197}
198
199int
200nv50_mpeg_init(struct nouveau_object *object)
201{
202 struct nv50_mpeg_priv *priv = (void *)object;
203 int ret;
204
205 ret = nouveau_mpeg_init(&priv->base);
206 if (ret)
207 return ret;
208
209 nv_wr32(priv, 0x00b32c, 0x00000000);
210 nv_wr32(priv, 0x00b314, 0x00000100);
211 nv_wr32(priv, 0x00b0e0, 0x0000001a);
212
213 nv_wr32(priv, 0x00b220, 0x00000044);
214 nv_wr32(priv, 0x00b300, 0x00801ec1);
215 nv_wr32(priv, 0x00b390, 0x00000000);
216 nv_wr32(priv, 0x00b394, 0x00000000);
217 nv_wr32(priv, 0x00b398, 0x00000000);
218 nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
219
220 nv_wr32(priv, 0x00b100, 0xffffffff);
221 nv_wr32(priv, 0x00b140, 0xffffffff);
222
223 if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
224 nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
225 return -EBUSY;
226 }
227
228 return 0;
229}
230
231struct nouveau_oclass
232nv50_mpeg_oclass = {
233 .handle = NV_ENGINE(MPEG, 0x50),
234 .ofuncs = &(struct nouveau_ofuncs) {
235 .ctor = nv50_mpeg_ctor,
236 .dtor = _nouveau_mpeg_dtor,
237 .init = nv50_mpeg_init,
238 .fini = _nouveau_mpeg_fini,
239 },
240};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 000000000000..8f805b44d59e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <subdev/vm.h>
30#include <subdev/bar.h>
31#include <subdev/timer.h>
32
33#include <engine/mpeg.h>
34
35struct nv84_mpeg_priv {
36 struct nouveau_mpeg base;
37};
38
39struct nv84_mpeg_chan {
40 struct nouveau_mpeg_chan base;
41};
42
43/*******************************************************************************
44 * MPEG object classes
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nv84_mpeg_sclass[] = {
49 { 0x8274, &nv50_mpeg_ofuncs },
50 {}
51};
52
53/*******************************************************************************
54 * PMPEG context
55 ******************************************************************************/
56
57static struct nouveau_oclass
58nv84_mpeg_cclass = {
59 .handle = NV_ENGCTX(MPEG, 0x84),
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nv50_mpeg_context_ctor,
62 .dtor = _nouveau_mpeg_context_dtor,
63 .init = _nouveau_mpeg_context_init,
64 .fini = _nouveau_mpeg_context_fini,
65 .rd32 = _nouveau_mpeg_context_rd32,
66 .wr32 = _nouveau_mpeg_context_wr32,
67 },
68};
69
70/*******************************************************************************
71 * PMPEG engine/subdev functions
72 ******************************************************************************/
73
74static int
75nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 size,
77 struct nouveau_object **pobject)
78{
79 struct nv84_mpeg_priv *priv;
80 int ret;
81
82 ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
83 *pobject = nv_object(priv);
84 if (ret)
85 return ret;
86
87 nv_subdev(priv)->unit = 0x00000002;
88 nv_subdev(priv)->intr = nv50_mpeg_intr;
89 nv_engine(priv)->cclass = &nv84_mpeg_cclass;
90 nv_engine(priv)->sclass = nv84_mpeg_sclass;
91 nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
92 return 0;
93}
94
95struct nouveau_oclass
96nv84_mpeg_oclass = {
97 .handle = NV_ENGINE(MPEG, 0x84),
98 .ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nv84_mpeg_ctor,
100 .dtor = _nouveau_mpeg_dtor,
101 .init = nv50_mpeg_init,
102 .fini = _nouveau_mpeg_fini,
103 },
104};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
new file mode 100644
index 000000000000..50e7e0da1981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/ppp.h>
30
31struct nv98_ppp_priv {
32 struct nouveau_ppp base;
33};
34
35struct nv98_ppp_chan {
36 struct nouveau_ppp_chan base;
37};
38
39/*******************************************************************************
40 * PPP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv98_ppp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PPPP context
50 ******************************************************************************/
51
52static int
53nv98_ppp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv98_ppp_chan *priv;
59 int ret;
60
61 ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv98_ppp_context_dtor(struct nouveau_object *object)
72{
73 struct nv98_ppp_chan *priv = (void *)object;
74 nouveau_ppp_context_destroy(&priv->base);
75}
76
77static int
78nv98_ppp_context_init(struct nouveau_object *object)
79{
80 struct nv98_ppp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_ppp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv98_ppp_chan *priv = (void *)object;
94 return nouveau_ppp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv98_ppp_cclass = {
99 .handle = NV_ENGCTX(PPP, 0x98),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv98_ppp_context_ctor,
102 .dtor = nv98_ppp_context_dtor,
103 .init = nv98_ppp_context_init,
104 .fini = nv98_ppp_context_fini,
105 .rd32 = _nouveau_ppp_context_rd32,
106 .wr32 = _nouveau_ppp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PPPP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv98_ppp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv98_ppp_priv *priv;
125 int ret;
126
127 ret = nouveau_ppp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x00400002;
133 nv_subdev(priv)->intr = nv98_ppp_intr;
134 nv_engine(priv)->cclass = &nv98_ppp_cclass;
135 nv_engine(priv)->sclass = nv98_ppp_sclass;
136 return 0;
137}
138
139static void
140nv98_ppp_dtor(struct nouveau_object *object)
141{
142 struct nv98_ppp_priv *priv = (void *)object;
143 nouveau_ppp_destroy(&priv->base);
144}
145
146static int
147nv98_ppp_init(struct nouveau_object *object)
148{
149 struct nv98_ppp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_ppp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv98_ppp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv98_ppp_priv *priv = (void *)object;
163 return nouveau_ppp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv98_ppp_oclass = {
168 .handle = NV_ENGINE(PPP, 0x98),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv98_ppp_ctor,
171 .dtor = nv98_ppp_dtor,
172 .init = nv98_ppp_init,
173 .fini = nv98_ppp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 000000000000..3ca4c3aa90b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/fifo.h>
31
32struct nv04_software_priv {
33 struct nouveau_software base;
34};
35
36struct nv04_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
46 void *data, u32 size)
47{
48 struct nouveau_object *channel = (void *)nv_engctx(object->parent);
49 struct nouveau_fifo_chan *fifo = (void *)channel->parent;
50 atomic_set(&fifo->refcnt, *(u32*)data);
51 return 0;
52}
53
54static int
55nv04_software_flip(struct nouveau_object *object, u32 mthd,
56 void *args, u32 size)
57{
58 struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
59 if (chan->base.flip)
60 return chan->base.flip(chan->base.flip_data);
61 return -EINVAL;
62}
63
64static struct nouveau_omthds
65nv04_software_omthds[] = {
66 { 0x0150, nv04_software_set_ref },
67 { 0x0500, nv04_software_flip },
68 {}
69};
70
71static struct nouveau_oclass
72nv04_software_sclass[] = {
73 { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
74 {}
75};
76
77/*******************************************************************************
78 * software context
79 ******************************************************************************/
80
81static int
82nv04_software_context_ctor(struct nouveau_object *parent,
83 struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct nv04_software_chan *chan;
88 int ret;
89
90 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
91 *pobject = nv_object(chan);
92 if (ret)
93 return ret;
94
95 return 0;
96}
97
98static struct nouveau_oclass
99nv04_software_cclass = {
100 .handle = NV_ENGCTX(SW, 0x04),
101 .ofuncs = &(struct nouveau_ofuncs) {
102 .ctor = nv04_software_context_ctor,
103 .dtor = _nouveau_software_context_dtor,
104 .init = _nouveau_software_context_init,
105 .fini = _nouveau_software_context_fini,
106 },
107};
108
109/*******************************************************************************
110 * software engine/subdev functions
111 ******************************************************************************/
112
113void
114nv04_software_intr(struct nouveau_subdev *subdev)
115{
116 nv_mask(subdev, 0x000100, 0x80000000, 0x00000000);
117}
118
119static int
120nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv04_software_priv *priv;
125 int ret;
126
127 ret = nouveau_software_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_engine(priv)->cclass = &nv04_software_cclass;
133 nv_engine(priv)->sclass = nv04_software_sclass;
134 nv_subdev(priv)->intr = nv04_software_intr;
135 return 0;
136}
137
138struct nouveau_oclass
139nv04_software_oclass = {
140 .handle = NV_ENGINE(SW, 0x04),
141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nv04_software_ctor,
143 .dtor = _nouveau_software_dtor,
144 .init = _nouveau_software_init,
145 .fini = _nouveau_software_fini,
146 },
147};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 000000000000..6e699afbfdb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30
31struct nv10_software_priv {
32 struct nouveau_software base;
33};
34
35struct nv10_software_chan {
36 struct nouveau_software_chan base;
37};
38
39/*******************************************************************************
40 * software object classes
41 ******************************************************************************/
42
43static int
44nv10_software_flip(struct nouveau_object *object, u32 mthd,
45 void *args, u32 size)
46{
47 struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
48 if (chan->base.flip)
49 return chan->base.flip(chan->base.flip_data);
50 return -EINVAL;
51}
52
53static struct nouveau_omthds
54nv10_software_omthds[] = {
55 { 0x0500, nv10_software_flip },
56 {}
57};
58
59static struct nouveau_oclass
60nv10_software_sclass[] = {
61 { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
62 {}
63};
64
65/*******************************************************************************
66 * software context
67 ******************************************************************************/
68
69static int
70nv10_software_context_ctor(struct nouveau_object *parent,
71 struct nouveau_object *engine,
72 struct nouveau_oclass *oclass, void *data, u32 size,
73 struct nouveau_object **pobject)
74{
75 struct nv10_software_chan *chan;
76 int ret;
77
78 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
79 *pobject = nv_object(chan);
80 if (ret)
81 return ret;
82
83 return 0;
84}
85
86static struct nouveau_oclass
87nv10_software_cclass = {
88 .handle = NV_ENGCTX(SW, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv10_software_context_ctor,
91 .dtor = _nouveau_software_context_dtor,
92 .init = _nouveau_software_context_init,
93 .fini = _nouveau_software_context_fini,
94 },
95};
96
97/*******************************************************************************
98 * software engine/subdev functions
99 ******************************************************************************/
100
101static int
102nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
103 struct nouveau_oclass *oclass, void *data, u32 size,
104 struct nouveau_object **pobject)
105{
106 struct nv10_software_priv *priv;
107 int ret;
108
109 ret = nouveau_software_create(parent, engine, oclass, &priv);
110 *pobject = nv_object(priv);
111 if (ret)
112 return ret;
113
114 nv_engine(priv)->cclass = &nv10_software_cclass;
115 nv_engine(priv)->sclass = nv10_software_sclass;
116 nv_subdev(priv)->intr = nv04_software_intr;
117 return 0;
118}
119
120struct nouveau_oclass
121nv10_software_oclass = {
122 .handle = NV_ENGINE(SW, 0x10),
123 .ofuncs = &(struct nouveau_ofuncs) {
124 .ctor = nv10_software_ctor,
125 .dtor = _nouveau_software_dtor,
126 .init = _nouveau_software_init,
127 .fini = _nouveau_software_fini,
128 },
129};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 000000000000..a2edcd38544a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28#include <core/namedb.h>
29#include <core/handle.h>
30#include <core/gpuobj.h>
31
32#include <engine/software.h>
33#include <engine/disp.h>
34
35struct nv50_software_priv {
36 struct nouveau_software base;
37};
38
39struct nv50_software_chan {
40 struct nouveau_software_chan base;
41};
42
43/*******************************************************************************
44 * software object classes
45 ******************************************************************************/
46
47static int
48nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
49 void *args, u32 size)
50{
51 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
52 struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
53 struct nouveau_handle *handle;
54 int ret = -EINVAL;
55
56 handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
57 if (!handle)
58 return -ENOENT;
59
60 if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
61 struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
62 chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
63 ret = 0;
64 }
65 nouveau_namedb_put(handle);
66 return ret;
67}
68
69static int
70nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
74 chan->base.vblank.offset = *(u32 *)args;
75 return 0;
76}
77
78static int
79nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
80 void *args, u32 size)
81{
82 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
83 chan->base.vblank.value = *(u32 *)args;
84 return 0;
85}
86
87static int
88nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
89 void *args, u32 size)
90{
91 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
92 struct nouveau_disp *disp = nouveau_disp(object);
93 unsigned long flags;
94 u32 crtc = *(u32 *)args;
95
96 if (crtc > 1)
97 return -EINVAL;
98
99 disp->vblank.get(disp->vblank.data, crtc);
100
101 spin_lock_irqsave(&disp->vblank.lock, flags);
102 list_add(&chan->base.vblank.head, &disp->vblank.list);
103 chan->base.vblank.crtc = crtc;
104 spin_unlock_irqrestore(&disp->vblank.lock, flags);
105 return 0;
106}
107
108static int
109nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
110 void *args, u32 size)
111{
112 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
113 if (chan->base.flip)
114 return chan->base.flip(chan->base.flip_data);
115 return -EINVAL;
116}
117
118static struct nouveau_omthds
119nv50_software_omthds[] = {
120 { 0x018c, nv50_software_mthd_dma_vblsem },
121 { 0x0400, nv50_software_mthd_vblsem_offset },
122 { 0x0404, nv50_software_mthd_vblsem_value },
123 { 0x0408, nv50_software_mthd_vblsem_release },
124 { 0x0500, nv50_software_mthd_flip },
125 {}
126};
127
128static struct nouveau_oclass
129nv50_software_sclass[] = {
130 { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
131 {}
132};
133
134/*******************************************************************************
135 * software context
136 ******************************************************************************/
137
138static int
139nv50_software_context_ctor(struct nouveau_object *parent,
140 struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size,
142 struct nouveau_object **pobject)
143{
144 struct nv50_software_chan *chan;
145 int ret;
146
147 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
148 *pobject = nv_object(chan);
149 if (ret)
150 return ret;
151
152 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
153 return 0;
154}
155
156static struct nouveau_oclass
157nv50_software_cclass = {
158 .handle = NV_ENGCTX(SW, 0x50),
159 .ofuncs = &(struct nouveau_ofuncs) {
160 .ctor = nv50_software_context_ctor,
161 .dtor = _nouveau_software_context_dtor,
162 .init = _nouveau_software_context_init,
163 .fini = _nouveau_software_context_fini,
164 },
165};
166
167/*******************************************************************************
168 * software engine/subdev functions
169 ******************************************************************************/
170
171static int
172nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject)
175{
176 struct nv50_software_priv *priv;
177 int ret;
178
179 ret = nouveau_software_create(parent, engine, oclass, &priv);
180 *pobject = nv_object(priv);
181 if (ret)
182 return ret;
183
184 nv_engine(priv)->cclass = &nv50_software_cclass;
185 nv_engine(priv)->sclass = nv50_software_sclass;
186 nv_subdev(priv)->intr = nv04_software_intr;
187 return 0;
188}
189
190struct nouveau_oclass
191nv50_software_oclass = {
192 .handle = NV_ENGINE(SW, 0x50),
193 .ofuncs = &(struct nouveau_ofuncs) {
194 .ctor = nv50_software_ctor,
195 .dtor = _nouveau_software_dtor,
196 .init = _nouveau_software_init,
197 .fini = _nouveau_software_fini,
198 },
199};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 000000000000..b7b0d7e330d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,181 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/software.h>
30#include <engine/disp.h>
31
32struct nvc0_software_priv {
33 struct nouveau_software base;
34};
35
36struct nvc0_software_chan {
37 struct nouveau_software_chan base;
38};
39
40/*******************************************************************************
41 * software object classes
42 ******************************************************************************/
43
44static int
45nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
46 void *args, u32 size)
47{
48 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
49 u64 data = *(u32 *)args;
50 if (mthd == 0x0400) {
51 chan->base.vblank.offset &= 0x00ffffffffULL;
52 chan->base.vblank.offset |= data << 32;
53 } else {
54 chan->base.vblank.offset &= 0xff00000000ULL;
55 chan->base.vblank.offset |= data;
56 }
57 return 0;
58}
59
60static int
61nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
62 void *args, u32 size)
63{
64 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
65 chan->base.vblank.value = *(u32 *)args;
66 return 0;
67}
68
69static int
70nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
71 void *args, u32 size)
72{
73 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
74 struct nouveau_disp *disp = nouveau_disp(object);
75 unsigned long flags;
76 u32 crtc = *(u32 *)args;
77
78 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
79 return -EINVAL;
80
81 disp->vblank.get(disp->vblank.data, crtc);
82
83 spin_lock_irqsave(&disp->vblank.lock, flags);
84 list_add(&chan->base.vblank.head, &disp->vblank.list);
85 chan->base.vblank.crtc = crtc;
86 spin_unlock_irqrestore(&disp->vblank.lock, flags);
87 return 0;
88}
89
90static int
91nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
92 void *args, u32 size)
93{
94 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
95 if (chan->base.flip)
96 return chan->base.flip(chan->base.flip_data);
97 return -EINVAL;
98}
99
100static struct nouveau_omthds
101nvc0_software_omthds[] = {
102 { 0x0400, nvc0_software_mthd_vblsem_offset },
103 { 0x0404, nvc0_software_mthd_vblsem_offset },
104 { 0x0408, nvc0_software_mthd_vblsem_value },
105 { 0x040c, nvc0_software_mthd_vblsem_release },
106 { 0x0500, nvc0_software_mthd_flip },
107 {}
108};
109
110static struct nouveau_oclass
111nvc0_software_sclass[] = {
112 { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
113 {}
114};
115
116/*******************************************************************************
117 * software context
118 ******************************************************************************/
119
120static int
121nvc0_software_context_ctor(struct nouveau_object *parent,
122 struct nouveau_object *engine,
123 struct nouveau_oclass *oclass, void *data, u32 size,
124 struct nouveau_object **pobject)
125{
126 struct nvc0_software_chan *chan;
127 int ret;
128
129 ret = nouveau_software_context_create(parent, engine, oclass, &chan);
130 *pobject = nv_object(chan);
131 if (ret)
132 return ret;
133
134 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
135 return 0;
136}
137
138static struct nouveau_oclass
139nvc0_software_cclass = {
140 .handle = NV_ENGCTX(SW, 0xc0),
141 .ofuncs = &(struct nouveau_ofuncs) {
142 .ctor = nvc0_software_context_ctor,
143 .dtor = _nouveau_software_context_dtor,
144 .init = _nouveau_software_context_init,
145 .fini = _nouveau_software_context_fini,
146 },
147};
148
149/*******************************************************************************
150 * software engine/subdev functions
151 ******************************************************************************/
152
153static int
154nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
155 struct nouveau_oclass *oclass, void *data, u32 size,
156 struct nouveau_object **pobject)
157{
158 struct nvc0_software_priv *priv;
159 int ret;
160
161 ret = nouveau_software_create(parent, engine, oclass, &priv);
162 *pobject = nv_object(priv);
163 if (ret)
164 return ret;
165
166 nv_engine(priv)->cclass = &nvc0_software_cclass;
167 nv_engine(priv)->sclass = nvc0_software_sclass;
168 nv_subdev(priv)->intr = nv04_software_intr;
169 return 0;
170}
171
172struct nouveau_oclass
173nvc0_software_oclass = {
174 .handle = NV_ENGINE(SW, 0xc0),
175 .ofuncs = &(struct nouveau_ofuncs) {
176 .ctor = nvc0_software_ctor,
177 .dtor = _nouveau_software_dtor,
178 .init = _nouveau_software_init,
179 .fini = _nouveau_software_fini,
180 },
181};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
new file mode 100644
index 000000000000..dd23c80e5405
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -0,0 +1,175 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h>
28
29#include <engine/vp.h>
30
31struct nv84_vp_priv {
32 struct nouveau_vp base;
33};
34
35struct nv84_vp_chan {
36 struct nouveau_vp_chan base;
37};
38
39/*******************************************************************************
40 * VP object classes
41 ******************************************************************************/
42
43static struct nouveau_oclass
44nv84_vp_sclass[] = {
45 {},
46};
47
48/*******************************************************************************
49 * PVP context
50 ******************************************************************************/
51
52static int
53nv84_vp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_vp_chan *priv;
59 int ret;
60
61 ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_vp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_vp_chan *priv = (void *)object;
74 nouveau_vp_context_destroy(&priv->base);
75}
76
77static int
78nv84_vp_context_init(struct nouveau_object *object)
79{
80 struct nv84_vp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_vp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_vp_chan *priv = (void *)object;
94 return nouveau_vp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass
98nv84_vp_cclass = {
99 .handle = NV_ENGCTX(VP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_vp_context_ctor,
102 .dtor = nv84_vp_context_dtor,
103 .init = nv84_vp_context_init,
104 .fini = nv84_vp_context_fini,
105 .rd32 = _nouveau_vp_context_rd32,
106 .wr32 = _nouveau_vp_context_wr32,
107 },
108};
109
110/*******************************************************************************
111 * PVP engine/subdev functions
112 ******************************************************************************/
113
114static void
115nv84_vp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int
120nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size,
122 struct nouveau_object **pobject)
123{
124 struct nv84_vp_priv *priv;
125 int ret;
126
127 ret = nouveau_vp_create(parent, engine, oclass, &priv);
128 *pobject = nv_object(priv);
129 if (ret)
130 return ret;
131
132 nv_subdev(priv)->unit = 0x01020000;
133 nv_subdev(priv)->intr = nv84_vp_intr;
134 nv_engine(priv)->cclass = &nv84_vp_cclass;
135 nv_engine(priv)->sclass = nv84_vp_sclass;
136 return 0;
137}
138
139static void
140nv84_vp_dtor(struct nouveau_object *object)
141{
142 struct nv84_vp_priv *priv = (void *)object;
143 nouveau_vp_destroy(&priv->base);
144}
145
146static int
147nv84_vp_init(struct nouveau_object *object)
148{
149 struct nv84_vp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_vp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_vp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_vp_priv *priv = (void *)object;
163 return nouveau_vp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass
167nv84_vp_oclass = {
168 .handle = NV_ENGINE(VP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_vp_ctor,
171 .dtor = nv84_vp_dtor,
172 .init = nv84_vp_init,
173 .fini = nv84_vp_fini,
174 },
175};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
new file mode 100644
index 000000000000..6180ae9800fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -0,0 +1,118 @@
1#ifndef __NOUVEAU_CLASS_H__
2#define __NOUVEAU_CLASS_H__
3
4/* Device class
5 *
6 * 0080: NV_DEVICE
7 */
8#define NV_DEVICE_CLASS 0x00000080
9
10#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL
11#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL
12#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL
13#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL
14#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL
15#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL
16#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL
17#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL
18#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL
19#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL
20#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL
21#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL
22#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
26
27struct nv_device_class {
28 u64 device; /* device identifier, ~0 for client default */
29 u64 disable; /* disable particular subsystems */
30 u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
31};
32
33/* DMA object classes
34 *
35 * 0002: NV_DMA_FROM_MEMORY
36 * 0003: NV_DMA_TO_MEMORY
37 * 003d: NV_DMA_IN_MEMORY
38 */
39#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
40#define NV_DMA_TO_MEMORY_CLASS 0x00000003
41#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
42
43#define NV_DMA_TARGET_MASK 0x000000ff
44#define NV_DMA_TARGET_VM 0x00000000
45#define NV_DMA_TARGET_VRAM 0x00000001
46#define NV_DMA_TARGET_PCI 0x00000002
47#define NV_DMA_TARGET_PCI_US 0x00000003
48#define NV_DMA_TARGET_AGP 0x00000004
49#define NV_DMA_ACCESS_MASK 0x00000f00
50#define NV_DMA_ACCESS_VM 0x00000000
51#define NV_DMA_ACCESS_RD 0x00000100
52#define NV_DMA_ACCESS_WR 0x00000200
53#define NV_DMA_ACCESS_RDWR 0x00000300
54
55struct nv_dma_class {
56 u32 flags;
57 u32 pad0;
58 u64 start;
59 u64 limit;
60};
61
62/* DMA FIFO channel classes
63 *
64 * 006b: NV03_CHANNEL_DMA
65 * 006e: NV10_CHANNEL_DMA
66 * 176e: NV17_CHANNEL_DMA
67 * 406e: NV40_CHANNEL_DMA
68 * 506e: NV50_CHANNEL_DMA
69 * 826e: NV84_CHANNEL_DMA
70 */
71#define NV03_CHANNEL_DMA_CLASS 0x0000006b
72#define NV10_CHANNEL_DMA_CLASS 0x0000006e
73#define NV17_CHANNEL_DMA_CLASS 0x0000176e
74#define NV40_CHANNEL_DMA_CLASS 0x0000406e
75#define NV50_CHANNEL_DMA_CLASS 0x0000506e
76#define NV84_CHANNEL_DMA_CLASS 0x0000826e
77
78struct nv03_channel_dma_class {
79 u32 pushbuf;
80 u32 pad0;
81 u64 offset;
82};
83
84/* Indirect FIFO channel classes
85 *
86 * 506f: NV50_CHANNEL_IND
87 * 826f: NV84_CHANNEL_IND
88 * 906f: NVC0_CHANNEL_IND
89 * a06f: NVE0_CHANNEL_IND
90 */
91
92#define NV50_CHANNEL_IND_CLASS 0x0000506f
93#define NV84_CHANNEL_IND_CLASS 0x0000826f
94#define NVC0_CHANNEL_IND_CLASS 0x0000906f
95#define NVE0_CHANNEL_IND_CLASS 0x0000a06f
96
97struct nv50_channel_ind_class {
98 u32 pushbuf;
99 u32 ilength;
100 u64 ioffset;
101};
102
103#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001
104#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002
105#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004
106#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008
107#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010
108#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020
109#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040
110
111struct nve0_channel_ind_class {
112 u32 pushbuf;
113 u32 ilength;
114 u64 ioffset;
115 u32 engine;
116};
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
new file mode 100644
index 000000000000..0193532ceac9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -0,0 +1,42 @@
1#ifndef __NOUVEAU_CLIENT_H__
2#define __NOUVEAU_CLIENT_H__
3
4#include <core/namedb.h>
5
6struct nouveau_client {
7 struct nouveau_namedb base;
8 struct nouveau_handle *root;
9 struct nouveau_object *device;
10 char name[16];
11 u32 debug;
12 struct nouveau_vm *vm;
13};
14
15static inline struct nouveau_client *
16nv_client(void *obj)
17{
18#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
19 if (unlikely(!nv_iclass(obj, NV_CLIENT_CLASS)))
20 nv_assert("BAD CAST -> NvClient, %08x", nv_hclass(obj));
21#endif
22 return obj;
23}
24
25static inline struct nouveau_client *
26nouveau_client(void *obj)
27{
28 struct nouveau_object *client = nv_object(obj);
29 while (client && !(nv_iclass(client, NV_CLIENT_CLASS)))
30 client = client->parent;
31 return (void *)client;
32}
33
34#define nouveau_client_create(n,c,oc,od,d) \
35 nouveau_client_create_((n), (c), (oc), (od), sizeof(**d), (void **)d)
36
37int nouveau_client_create_(const char *name, u64 device, const char *cfg,
38 const char *dbg, int, void **);
39int nouveau_client_init(struct nouveau_client *);
40int nouveau_client_fini(struct nouveau_client *, bool suspend);
41
42#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/debug.h b/drivers/gpu/drm/nouveau/core/include/core/debug.h
new file mode 100644
index 000000000000..9ea18dfcb4d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/debug.h
@@ -0,0 +1,13 @@
1#ifndef __NOUVEAU_DEBUG_H__
2#define __NOUVEAU_DEBUG_H__
3
4#define NV_DBG_FATAL 0
5#define NV_DBG_ERROR 1
6#define NV_DBG_WARN 2
7#define NV_DBG_INFO 3
8#define NV_DBG_DEBUG 4
9#define NV_DBG_TRACE 5
10#define NV_DBG_PARANOIA 6
11#define NV_DBG_SPAM 7
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
new file mode 100644
index 000000000000..e58b6f0984c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -0,0 +1,136 @@
1#ifndef __NOUVEAU_DEVICE_H__
2#define __NOUVEAU_DEVICE_H__
3
4#include <core/object.h>
5#include <core/subdev.h>
6#include <core/engine.h>
7
8enum nv_subdev_type {
9 NVDEV_SUBDEV_DEVICE,
10 NVDEV_SUBDEV_VBIOS,
11
12 /* All subdevs from DEVINIT to DEVINIT_LAST will be created before
13 * *any* of them are initialised. This subdev category is used
14 * for any subdevs that the VBIOS init table parsing may call out
15 * to during POST.
16 */
17 NVDEV_SUBDEV_DEVINIT,
18 NVDEV_SUBDEV_GPIO,
19 NVDEV_SUBDEV_I2C,
20 NVDEV_SUBDEV_CLOCK,
21 NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_CLOCK,
22
23 /* This grouping of subdevs are initialised right after they've
24 * been created, and are allowed to assume any subdevs in the
25 * list above them exist and have been initialised.
26 */
27 NVDEV_SUBDEV_MXM,
28 NVDEV_SUBDEV_MC,
29 NVDEV_SUBDEV_TIMER,
30 NVDEV_SUBDEV_FB,
31 NVDEV_SUBDEV_LTCG,
32 NVDEV_SUBDEV_IBUS,
33 NVDEV_SUBDEV_INSTMEM,
34 NVDEV_SUBDEV_VM,
35 NVDEV_SUBDEV_BAR,
36 NVDEV_SUBDEV_VOLT,
37 NVDEV_SUBDEV_THERM,
38
39 NVDEV_ENGINE_DMAOBJ,
40 NVDEV_ENGINE_FIFO,
41 NVDEV_ENGINE_SW,
42 NVDEV_ENGINE_GR,
43 NVDEV_ENGINE_MPEG,
44 NVDEV_ENGINE_ME,
45 NVDEV_ENGINE_VP,
46 NVDEV_ENGINE_CRYPT,
47 NVDEV_ENGINE_BSP,
48 NVDEV_ENGINE_PPP,
49 NVDEV_ENGINE_COPY0,
50 NVDEV_ENGINE_COPY1,
51 NVDEV_ENGINE_UNK1C1,
52 NVDEV_ENGINE_VENC,
53 NVDEV_ENGINE_DISP,
54
55 NVDEV_SUBDEV_NR,
56};
57
58struct nouveau_device {
59 struct nouveau_subdev base;
60 struct list_head head;
61
62 struct pci_dev *pdev;
63 u64 handle;
64
65 const char *cfgopt;
66 const char *dbgopt;
67 const char *name;
68 const char *cname;
69
70 enum {
71 NV_04 = 0x04,
72 NV_10 = 0x10,
73 NV_20 = 0x20,
74 NV_30 = 0x30,
75 NV_40 = 0x40,
76 NV_50 = 0x50,
77 NV_C0 = 0xc0,
78 NV_D0 = 0xd0,
79 NV_E0 = 0xe0,
80 } card_type;
81 u32 chipset;
82 u32 crystal;
83
84 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
85 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
86};
87
88static inline struct nouveau_device *
89nv_device(void *obj)
90{
91 struct nouveau_object *object = nv_object(obj);
92 struct nouveau_object *device = object;
93
94 if (device->engine)
95 device = device->engine;
96 if (device->parent)
97 device = device->parent;
98
99#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
100 if (unlikely(!nv_iclass(device, NV_SUBDEV_CLASS) ||
101 (nv_hclass(device) & 0xff) != NVDEV_SUBDEV_DEVICE)) {
102 nv_assert("BAD CAST -> NvDevice, 0x%08x 0x%08x",
103 nv_hclass(object), nv_hclass(device));
104 }
105#endif
106
107 return (void *)device;
108}
109
110static inline struct nouveau_subdev *
111nouveau_subdev(void *obj, int sub)
112{
113 if (nv_device(obj)->subdev[sub])
114 return nv_subdev(nv_device(obj)->subdev[sub]);
115 return NULL;
116}
117
118static inline struct nouveau_engine *
119nouveau_engine(void *obj, int sub)
120{
121 struct nouveau_subdev *subdev = nouveau_subdev(obj, sub);
122 if (subdev && nv_iclass(subdev, NV_ENGINE_CLASS))
123 return nv_engine(subdev);
124 return NULL;
125}
126
127static inline bool
128nv_device_match(struct nouveau_object *object, u16 dev, u16 ven, u16 sub)
129{
130 struct nouveau_device *device = nv_device(object);
131 return device->pdev->device == dev &&
132 device->pdev->subsystem_vendor == ven &&
133 device->pdev->subsystem_device == sub;
134}
135
136#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
new file mode 100644
index 000000000000..8a947b6872eb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -0,0 +1,51 @@
1#ifndef __NOUVEAU_ENGCTX_H__
2#define __NOUVEAU_ENGCTX_H__
3
4#include <core/object.h>
5#include <core/gpuobj.h>
6
7#include <subdev/vm.h>
8
9#define NV_ENGCTX_(eng,var) (NV_ENGCTX_CLASS | ((var) << 8) | (eng))
10#define NV_ENGCTX(name,var) NV_ENGCTX_(NVDEV_ENGINE_##name, (var))
11
12struct nouveau_engctx {
13 struct nouveau_gpuobj base;
14 struct nouveau_vma vma;
15 struct list_head head;
16 unsigned long save;
17 u64 addr;
18};
19
20static inline struct nouveau_engctx *
21nv_engctx(void *obj)
22{
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24 if (unlikely(!nv_iclass(obj, NV_ENGCTX_CLASS)))
25 nv_assert("BAD CAST -> NvEngCtx, %08x", nv_hclass(obj));
26#endif
27 return obj;
28}
29
30#define nouveau_engctx_create(p,e,c,g,s,a,f,d) \
31 nouveau_engctx_create_((p), (e), (c), (g), (s), (a), (f), \
32 sizeof(**d), (void **)d)
33
34int nouveau_engctx_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, struct nouveau_object *,
36 u32 size, u32 align, u32 flags,
37 int length, void **data);
38void nouveau_engctx_destroy(struct nouveau_engctx *);
39int nouveau_engctx_init(struct nouveau_engctx *);
40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
41
42void _nouveau_engctx_dtor(struct nouveau_object *);
43int _nouveau_engctx_init(struct nouveau_object *);
44int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
45#define _nouveau_engctx_rd32 _nouveau_gpuobj_rd32
46#define _nouveau_engctx_wr32 _nouveau_gpuobj_wr32
47
48struct nouveau_object *nouveau_engctx_get(struct nouveau_engine *, u64 addr);
49void nouveau_engctx_put(struct nouveau_object *);
50
51#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engine.h b/drivers/gpu/drm/nouveau/core/include/core/engine.h
new file mode 100644
index 000000000000..666d06de77ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/engine.h
@@ -0,0 +1,57 @@
1#ifndef __NOUVEAU_ENGINE_H__
2#define __NOUVEAU_ENGINE_H__
3
4#include <core/object.h>
5#include <core/subdev.h>
6
7#define NV_ENGINE_(eng,var) (NV_ENGINE_CLASS | ((var) << 8) | (eng))
8#define NV_ENGINE(name,var) NV_ENGINE_(NVDEV_ENGINE_##name, (var))
9
10struct nouveau_engine {
11 struct nouveau_subdev base;
12 struct nouveau_oclass *cclass;
13 struct nouveau_oclass *sclass;
14
15 struct list_head contexts;
16 spinlock_t lock;
17
18 void (*tile_prog)(struct nouveau_engine *, int region);
19 int (*tlb_flush)(struct nouveau_engine *);
20};
21
22static inline struct nouveau_engine *
23nv_engine(void *obj)
24{
25#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
26 if (unlikely(!nv_iclass(obj, NV_ENGINE_CLASS)))
27 nv_assert("BAD CAST -> NvEngine, %08x", nv_hclass(obj));
28#endif
29 return obj;
30}
31
32static inline int
33nv_engidx(struct nouveau_object *object)
34{
35 return nv_subidx(object);
36}
37
38#define nouveau_engine_create(p,e,c,d,i,f,r) \
39 nouveau_engine_create_((p), (e), (c), (d), (i), (f), \
40 sizeof(**r),(void **)r)
41
42#define nouveau_engine_destroy(p) \
43 nouveau_subdev_destroy(&(p)->base)
44#define nouveau_engine_init(p) \
45 nouveau_subdev_init(&(p)->base)
46#define nouveau_engine_fini(p,s) \
47 nouveau_subdev_fini(&(p)->base, (s))
48
49int nouveau_engine_create_(struct nouveau_object *, struct nouveau_object *,
50 struct nouveau_oclass *, bool, const char *,
51 const char *, int, void **);
52
53#define _nouveau_engine_dtor _nouveau_subdev_dtor
54#define _nouveau_engine_init _nouveau_subdev_init
55#define _nouveau_engine_fini _nouveau_subdev_fini
56
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
new file mode 100644
index 000000000000..e7b1e181943b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -0,0 +1,23 @@
1#ifndef __NOUVEAU_ENUM_H__
2#define __NOUVEAU_ENUM_H__
3
4struct nouveau_enum {
5 u32 value;
6 const char *name;
7 const void *data;
8};
9
10const struct nouveau_enum *
11nouveau_enum_find(const struct nouveau_enum *, u32 value);
12
13void
14nouveau_enum_print(const struct nouveau_enum *en, u32 value);
15
16struct nouveau_bitfield {
17 u32 mask;
18 const char *name;
19};
20
21void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
new file mode 100644
index 000000000000..6eaff79377ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -0,0 +1,71 @@
1#ifndef __NOUVEAU_GPUOBJ_H__
2#define __NOUVEAU_GPUOBJ_H__
3
4#include <core/object.h>
5#include <core/device.h>
6#include <core/parent.h>
7#include <core/mm.h>
8
9struct nouveau_vma;
10struct nouveau_vm;
11
12#define NVOBJ_FLAG_ZERO_ALLOC 0x00000001
13#define NVOBJ_FLAG_ZERO_FREE 0x00000002
14#define NVOBJ_FLAG_HEAP 0x00000004
15
16struct nouveau_gpuobj {
17 struct nouveau_object base;
18 struct nouveau_object *parent;
19 struct nouveau_mm_node *node;
20 struct nouveau_mm heap;
21
22 u32 flags;
23 u64 addr;
24 u32 size;
25};
26
27static inline struct nouveau_gpuobj *
28nv_gpuobj(void *obj)
29{
30#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
31 if (unlikely(!nv_iclass(obj, NV_GPUOBJ_CLASS)))
32 nv_assert("BAD CAST -> NvGpuObj, %08x", nv_hclass(obj));
33#endif
34 return obj;
35}
36
37#define nouveau_gpuobj_create(p,e,c,v,g,s,a,f,d) \
38 nouveau_gpuobj_create_((p), (e), (c), (v), (g), (s), (a), (f), \
39 sizeof(**d), (void **)d)
40#define nouveau_gpuobj_init(p) nouveau_object_init(&(p)->base)
41#define nouveau_gpuobj_fini(p,s) nouveau_object_fini(&(p)->base, (s))
42int nouveau_gpuobj_create_(struct nouveau_object *, struct nouveau_object *,
43 struct nouveau_oclass *, u32 pclass,
44 struct nouveau_object *, u32 size, u32 align,
45 u32 flags, int length, void **);
46void nouveau_gpuobj_destroy(struct nouveau_gpuobj *);
47
48int nouveau_gpuobj_new(struct nouveau_object *, struct nouveau_object *,
49 u32 size, u32 align, u32 flags,
50 struct nouveau_gpuobj **);
51int nouveau_gpuobj_dup(struct nouveau_object *, struct nouveau_gpuobj *,
52 struct nouveau_gpuobj **);
53
54int nouveau_gpuobj_map(struct nouveau_gpuobj *, u32 acc, struct nouveau_vma *);
55int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *, struct nouveau_vm *,
56 u32 access, struct nouveau_vma *);
57void nouveau_gpuobj_unmap(struct nouveau_vma *);
58
59static inline void
60nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
61{
62 nouveau_object_ref(&obj->base, (struct nouveau_object **)ref);
63}
64
65void _nouveau_gpuobj_dtor(struct nouveau_object *);
66int _nouveau_gpuobj_init(struct nouveau_object *);
67int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32);
69void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32);
70
71#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
new file mode 100644
index 000000000000..363674cdf8ab
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -0,0 +1,31 @@
1#ifndef __NOUVEAU_HANDLE_H__
2#define __NOUVEAU_HANDLE_H__
3
4struct nouveau_handle {
5 struct nouveau_namedb *namedb;
6 struct list_head node;
7
8 struct list_head head;
9 struct list_head tree;
10 u32 name;
11 u32 priv;
12
13 struct nouveau_handle *parent;
14 struct nouveau_object *object;
15};
16
17int nouveau_handle_create(struct nouveau_object *, u32 parent, u32 handle,
18 struct nouveau_object *, struct nouveau_handle **);
19void nouveau_handle_destroy(struct nouveau_handle *);
20int nouveau_handle_init(struct nouveau_handle *);
21int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
22
23struct nouveau_object *
24nouveau_handle_ref(struct nouveau_object *, u32 name);
25
26struct nouveau_handle *nouveau_handle_get_class(struct nouveau_object *, u16);
27struct nouveau_handle *nouveau_handle_get_vinst(struct nouveau_object *, u64);
28struct nouveau_handle *nouveau_handle_get_cinst(struct nouveau_object *, u32);
29void nouveau_handle_put(struct nouveau_handle *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
new file mode 100644
index 000000000000..f808131c5cd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/math.h
@@ -0,0 +1,16 @@
1#ifndef __NOUVEAU_MATH_H__
2#define __NOUVEAU_MATH_H__
3
4static inline int
5log2i(u64 base)
6{
7 u64 temp = base >> 1;
8 int log2;
9
10 for (log2 = 0; temp; log2++, temp >>= 1) {
11 }
12
13 return (base & (base - 1)) ? log2 + 1: log2;
14}
15
16#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
new file mode 100644
index 000000000000..9ee9bf4028ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -0,0 +1,33 @@
1#ifndef __NOUVEAU_MM_H__
2#define __NOUVEAU_MM_H__
3
4struct nouveau_mm_node {
5 struct list_head nl_entry;
6 struct list_head fl_entry;
7 struct list_head rl_entry;
8
9 u8 type;
10 u32 offset;
11 u32 length;
12};
13
14struct nouveau_mm {
15 struct list_head nodes;
16 struct list_head free;
17
18 struct mutex mutex;
19
20 u32 block_size;
21 int heap_nodes;
22 u32 heap_size;
23};
24
25int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
26int nouveau_mm_fini(struct nouveau_mm *);
27int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
28 u32 align, struct nouveau_mm_node **);
29int nouveau_mm_tail(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
30 u32 align, struct nouveau_mm_node **);
31void nouveau_mm_free(struct nouveau_mm *, struct nouveau_mm_node **);
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/namedb.h b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
new file mode 100644
index 000000000000..8897e0886085
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/namedb.h
@@ -0,0 +1,56 @@
1#ifndef __NOUVEAU_NAMEDB_H__
2#define __NOUVEAU_NAMEDB_H__
3
4#include <core/parent.h>
5
6struct nouveau_handle;
7
8struct nouveau_namedb {
9 struct nouveau_parent base;
10 rwlock_t lock;
11 struct list_head list;
12};
13
14static inline struct nouveau_namedb *
15nv_namedb(void *obj)
16{
17#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
18 if (unlikely(!nv_iclass(obj, NV_NAMEDB_CLASS)))
19 nv_assert("BAD CAST -> NvNameDB, %08x", nv_hclass(obj));
20#endif
21 return obj;
22}
23
24#define nouveau_namedb_create(p,e,c,v,s,m,d) \
25 nouveau_namedb_create_((p), (e), (c), (v), (s), (m), \
26 sizeof(**d), (void **)d)
27#define nouveau_namedb_init(p) \
28 nouveau_parent_init(&(p)->base)
29#define nouveau_namedb_fini(p,s) \
30 nouveau_parent_fini(&(p)->base, (s))
31#define nouveau_namedb_destroy(p) \
32 nouveau_parent_destroy(&(p)->base)
33
34int nouveau_namedb_create_(struct nouveau_object *, struct nouveau_object *,
35 struct nouveau_oclass *, u32 pclass,
36 struct nouveau_oclass *, u32 engcls,
37 int size, void **);
38
39int _nouveau_namedb_ctor(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, void *, u32,
41 struct nouveau_object **);
42#define _nouveau_namedb_dtor _nouveau_parent_dtor
43#define _nouveau_namedb_init _nouveau_parent_init
44#define _nouveau_namedb_fini _nouveau_parent_fini
45
46int nouveau_namedb_insert(struct nouveau_namedb *, u32 name,
47 struct nouveau_object *, struct nouveau_handle *);
48void nouveau_namedb_remove(struct nouveau_handle *);
49
50struct nouveau_handle *nouveau_namedb_get(struct nouveau_namedb *, u32);
51struct nouveau_handle *nouveau_namedb_get_class(struct nouveau_namedb *, u16);
52struct nouveau_handle *nouveau_namedb_get_vinst(struct nouveau_namedb *, u64);
53struct nouveau_handle *nouveau_namedb_get_cinst(struct nouveau_namedb *, u32);
54void nouveau_namedb_put(struct nouveau_handle *);
55
56#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
new file mode 100644
index 000000000000..818feabbf4a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -0,0 +1,188 @@
1#ifndef __NOUVEAU_OBJECT_H__
2#define __NOUVEAU_OBJECT_H__
3
4#include <core/os.h>
5#include <core/printk.h>
6
7#define NV_PARENT_CLASS 0x80000000
8#define NV_NAMEDB_CLASS 0x40000000
9#define NV_CLIENT_CLASS 0x20000000
10#define NV_SUBDEV_CLASS 0x10000000
11#define NV_ENGINE_CLASS 0x08000000
12#define NV_MEMOBJ_CLASS 0x04000000
13#define NV_GPUOBJ_CLASS 0x02000000
14#define NV_ENGCTX_CLASS 0x01000000
15#define NV_OBJECT_CLASS 0x0000ffff
16
17struct nouveau_object {
18 struct nouveau_oclass *oclass;
19 struct nouveau_object *parent;
20 struct nouveau_object *engine;
21 atomic_t refcount;
22 atomic_t usecount;
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24#define NOUVEAU_OBJECT_MAGIC 0x75ef0bad
25 struct list_head list;
26 u32 _magic;
27#endif
28};
29
30static inline struct nouveau_object *
31nv_object(void *obj)
32{
33#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
34 if (likely(obj)) {
35 struct nouveau_object *object = obj;
36 if (unlikely(object->_magic != NOUVEAU_OBJECT_MAGIC))
37 nv_assert("BAD CAST -> NvObject, invalid magic");
38 }
39#endif
40 return obj;
41}
42
43#define nouveau_object_create(p,e,c,s,d) \
44 nouveau_object_create_((p), (e), (c), (s), sizeof(**d), (void **)d)
45int nouveau_object_create_(struct nouveau_object *, struct nouveau_object *,
46 struct nouveau_oclass *, u32, int size, void **);
47void nouveau_object_destroy(struct nouveau_object *);
48int nouveau_object_init(struct nouveau_object *);
49int nouveau_object_fini(struct nouveau_object *, bool suspend);
50
51extern struct nouveau_ofuncs nouveau_object_ofuncs;
52
53struct nouveau_oclass {
54 u32 handle;
55 struct nouveau_ofuncs *ofuncs;
56 struct nouveau_omthds *omthds;
57};
58
59#define nv_oclass(o) nv_object(o)->oclass
60#define nv_hclass(o) nv_oclass(o)->handle
61#define nv_iclass(o,i) (nv_hclass(o) & (i))
62#define nv_mclass(o) nv_iclass(o, NV_OBJECT_CLASS)
63
64static inline struct nouveau_object *
65nv_pclass(struct nouveau_object *parent, u32 oclass)
66{
67 while (parent && !nv_iclass(parent, oclass))
68 parent = parent->parent;
69 return parent;
70}
71
72struct nouveau_omthds {
73 u32 method;
74 int (*call)(struct nouveau_object *, u32, void *, u32);
75};
76
77struct nouveau_ofuncs {
78 int (*ctor)(struct nouveau_object *, struct nouveau_object *,
79 struct nouveau_oclass *, void *data, u32 size,
80 struct nouveau_object **);
81 void (*dtor)(struct nouveau_object *);
82 int (*init)(struct nouveau_object *);
83 int (*fini)(struct nouveau_object *, bool suspend);
84 u8 (*rd08)(struct nouveau_object *, u32 offset);
85 u16 (*rd16)(struct nouveau_object *, u32 offset);
86 u32 (*rd32)(struct nouveau_object *, u32 offset);
87 void (*wr08)(struct nouveau_object *, u32 offset, u8 data);
88 void (*wr16)(struct nouveau_object *, u32 offset, u16 data);
89 void (*wr32)(struct nouveau_object *, u32 offset, u32 data);
90};
91
92static inline struct nouveau_ofuncs *
93nv_ofuncs(void *obj)
94{
95 return nv_oclass(obj)->ofuncs;
96}
97
98int nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
99 struct nouveau_oclass *, void *, u32,
100 struct nouveau_object **);
101void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
102int nouveau_object_inc(struct nouveau_object *);
103int nouveau_object_dec(struct nouveau_object *, bool suspend);
104
105int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
106 u16 oclass, void *data, u32 size,
107 struct nouveau_object **);
108int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
109void nouveau_object_debug(void);
110
111static inline int
112nv_call(void *obj, u32 mthd, u32 data)
113{
114 struct nouveau_omthds *method = nv_oclass(obj)->omthds;
115
116 while (method && method->call) {
117 if (method->method == mthd)
118 return method->call(obj, mthd, &data, sizeof(data));
119 method++;
120 }
121
122 return -EINVAL;
123}
124
125static inline u8
126nv_ro08(void *obj, u32 addr)
127{
128 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
129 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
130 return data;
131}
132
133static inline u16
134nv_ro16(void *obj, u32 addr)
135{
136 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
137 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
138 return data;
139}
140
141static inline u32
142nv_ro32(void *obj, u32 addr)
143{
144 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
145 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
146 return data;
147}
148
149static inline void
150nv_wo08(void *obj, u32 addr, u8 data)
151{
152 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
153 nv_ofuncs(obj)->wr08(obj, addr, data);
154}
155
156static inline void
157nv_wo16(void *obj, u32 addr, u16 data)
158{
159 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
160 nv_ofuncs(obj)->wr16(obj, addr, data);
161}
162
163static inline void
164nv_wo32(void *obj, u32 addr, u32 data)
165{
166 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
167 nv_ofuncs(obj)->wr32(obj, addr, data);
168}
169
170static inline u32
171nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
172{
173 u32 temp = nv_ro32(obj, addr);
174 nv_wo32(obj, addr, (temp & ~mask) | data);
175 return temp;
176}
177
178static inline bool
179nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
180{
181 while (len--) {
182 if (nv_ro08(obj, addr++) != *(str++))
183 return false;
184 }
185 return true;
186}
187
188#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/option.h b/drivers/gpu/drm/nouveau/core/include/core/option.h
new file mode 100644
index 000000000000..27074957fd21
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/option.h
@@ -0,0 +1,11 @@
1#ifndef __NOUVEAU_OPTION_H__
2#define __NOUVEAU_OPTION_H__
3
4#include <core/os.h>
5
6const char *nouveau_stropt(const char *optstr, const char *opt, int *len);
7bool nouveau_boolopt(const char *optstr, const char *opt, bool value);
8
9int nouveau_dbgopt(const char *optstr, const char *sub);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
new file mode 100644
index 000000000000..d3aa251a5eb6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -0,0 +1,64 @@
1#ifndef __NOUVEAU_PARENT_H__
2#define __NOUVEAU_PARENT_H__
3
4#include <core/device.h>
5#include <core/object.h>
6
7struct nouveau_sclass {
8 struct nouveau_sclass *sclass;
9 struct nouveau_engine *engine;
10 struct nouveau_oclass *oclass;
11};
12
13struct nouveau_parent {
14 struct nouveau_object base;
15
16 struct nouveau_sclass *sclass;
17 u32 engine;
18
19 int (*context_attach)(struct nouveau_object *,
20 struct nouveau_object *);
21 int (*context_detach)(struct nouveau_object *, bool suspend,
22 struct nouveau_object *);
23
24 int (*object_attach)(struct nouveau_object *parent,
25 struct nouveau_object *object, u32 name);
26 void (*object_detach)(struct nouveau_object *parent, int cookie);
27};
28
29static inline struct nouveau_parent *
30nv_parent(void *obj)
31{
32#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
33 if (unlikely(!(nv_iclass(obj, NV_PARENT_CLASS))))
34 nv_assert("BAD CAST -> NvParent, %08x", nv_hclass(obj));
35#endif
36 return obj;
37}
38
39#define nouveau_parent_create(p,e,c,v,s,m,d) \
40 nouveau_parent_create_((p), (e), (c), (v), (s), (m), \
41 sizeof(**d), (void **)d)
42#define nouveau_parent_init(p) \
43 nouveau_object_init(&(p)->base)
44#define nouveau_parent_fini(p,s) \
45 nouveau_object_fini(&(p)->base, (s))
46
47int nouveau_parent_create_(struct nouveau_object *, struct nouveau_object *,
48 struct nouveau_oclass *, u32 pclass,
49 struct nouveau_oclass *, u64 engcls,
50 int size, void **);
51void nouveau_parent_destroy(struct nouveau_parent *);
52
53int _nouveau_parent_ctor(struct nouveau_object *, struct nouveau_object *,
54 struct nouveau_oclass *, void *, u32,
55 struct nouveau_object **);
56void _nouveau_parent_dtor(struct nouveau_object *);
57#define _nouveau_parent_init _nouveau_object_init
58#define _nouveau_parent_fini _nouveau_object_fini
59
60int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
61 struct nouveau_object **pengine,
62 struct nouveau_oclass **poclass);
63
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
new file mode 100644
index 000000000000..1d629664f32d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -0,0 +1,39 @@
1#ifndef __NOUVEAU_PRINTK_H__
2#define __NOUVEAU_PRINTK_H__
3
4#include <core/os.h>
5#include <core/debug.h>
6
7struct nouveau_object;
8
9#define NV_PRINTK_FATAL KERN_CRIT
10#define NV_PRINTK_ERROR KERN_ERR
11#define NV_PRINTK_WARN KERN_WARNING
12#define NV_PRINTK_INFO KERN_INFO
13#define NV_PRINTK_DEBUG KERN_DEBUG
14#define NV_PRINTK_PARANOIA KERN_DEBUG
15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG
17
18void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
19
20#define nv_printk(o,l,f,a...) do { \
21 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
22 nv_printk_(nv_object(o), NV_PRINTK_##l, NV_DBG_##l, f, ##a); \
23} while(0)
24
25#define nv_fatal(o,f,a...) nv_printk((o), FATAL, f, ##a)
26#define nv_error(o,f,a...) nv_printk((o), ERROR, f, ##a)
27#define nv_warn(o,f,a...) nv_printk((o), WARN, f, ##a)
28#define nv_info(o,f,a...) nv_printk((o), INFO, f, ##a)
29#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
30#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
31#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
32
33#define nv_assert(f,a...) do { \
34 if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
35 nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
36 BUG_ON(1); \
37} while(0)
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
new file mode 100644
index 000000000000..47e4cacbca37
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -0,0 +1,23 @@
1#ifndef __NOUVEAU_RAMHT_H__
2#define __NOUVEAU_RAMHT_H__
3
4#include <core/gpuobj.h>
5
6struct nouveau_ramht {
7 struct nouveau_gpuobj base;
8 int bits;
9};
10
11int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
12 u32 handle, u32 context);
13void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
14int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
15 u32 size, u32 align, struct nouveau_ramht **);
16
17static inline void
18nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
19{
20 nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
21}
22
23#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/subdev.h b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
new file mode 100644
index 000000000000..e9632e931616
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/subdev.h
@@ -0,0 +1,118 @@
1#ifndef __NOUVEAU_SUBDEV_H__
2#define __NOUVEAU_SUBDEV_H__
3
4#include <core/object.h>
5
6#define NV_SUBDEV_(sub,var) (NV_SUBDEV_CLASS | ((var) << 8) | (sub))
7#define NV_SUBDEV(name,var) NV_SUBDEV_(NVDEV_SUBDEV_##name, (var))
8
9struct nouveau_subdev {
10 struct nouveau_object base;
11 struct mutex mutex;
12 const char *name;
13 void __iomem *mmio;
14 u32 debug;
15 u32 unit;
16
17 void (*intr)(struct nouveau_subdev *);
18};
19
20static inline struct nouveau_subdev *
21nv_subdev(void *obj)
22{
23#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
24 if (unlikely(!nv_iclass(obj, NV_SUBDEV_CLASS)))
25 nv_assert("BAD CAST -> NvSubDev, %08x", nv_hclass(obj));
26#endif
27 return obj;
28}
29
30static inline int
31nv_subidx(struct nouveau_object *object)
32{
33 return nv_hclass(nv_subdev(object)) & 0xff;
34}
35
36#define nouveau_subdev_create(p,e,o,v,s,f,d) \
37 nouveau_subdev_create_((p), (e), (o), (v), (s), (f), \
38 sizeof(**d),(void **)d)
39
40int nouveau_subdev_create_(struct nouveau_object *, struct nouveau_object *,
41 struct nouveau_oclass *, u32 pclass,
42 const char *sname, const char *fname,
43 int size, void **);
44void nouveau_subdev_destroy(struct nouveau_subdev *);
45int nouveau_subdev_init(struct nouveau_subdev *);
46int nouveau_subdev_fini(struct nouveau_subdev *, bool suspend);
47void nouveau_subdev_reset(struct nouveau_object *);
48
49void _nouveau_subdev_dtor(struct nouveau_object *);
50int _nouveau_subdev_init(struct nouveau_object *);
51int _nouveau_subdev_fini(struct nouveau_object *, bool suspend);
52
53#define s_printk(s,l,f,a...) do { \
54 if ((s)->debug >= OS_DBG_##l) { \
55 nv_printk((s)->base.parent, (s)->name, l, f, ##a); \
56 } \
57} while(0)
58
59static inline u8
60nv_rd08(void *obj, u32 addr)
61{
62 struct nouveau_subdev *subdev = nv_subdev(obj);
63 u8 data = ioread8(subdev->mmio + addr);
64 nv_spam(subdev, "nv_rd08 0x%06x 0x%02x\n", addr, data);
65 return data;
66}
67
68static inline u16
69nv_rd16(void *obj, u32 addr)
70{
71 struct nouveau_subdev *subdev = nv_subdev(obj);
72 u16 data = ioread16_native(subdev->mmio + addr);
73 nv_spam(subdev, "nv_rd16 0x%06x 0x%04x\n", addr, data);
74 return data;
75}
76
77static inline u32
78nv_rd32(void *obj, u32 addr)
79{
80 struct nouveau_subdev *subdev = nv_subdev(obj);
81 u32 data = ioread32_native(subdev->mmio + addr);
82 nv_spam(subdev, "nv_rd32 0x%06x 0x%08x\n", addr, data);
83 return data;
84}
85
86static inline void
87nv_wr08(void *obj, u32 addr, u8 data)
88{
89 struct nouveau_subdev *subdev = nv_subdev(obj);
90 nv_spam(subdev, "nv_wr08 0x%06x 0x%02x\n", addr, data);
91 iowrite8(data, subdev->mmio + addr);
92}
93
94static inline void
95nv_wr16(void *obj, u32 addr, u16 data)
96{
97 struct nouveau_subdev *subdev = nv_subdev(obj);
98 nv_spam(subdev, "nv_wr16 0x%06x 0x%04x\n", addr, data);
99 iowrite16_native(data, subdev->mmio + addr);
100}
101
102static inline void
103nv_wr32(void *obj, u32 addr, u32 data)
104{
105 struct nouveau_subdev *subdev = nv_subdev(obj);
106 nv_spam(subdev, "nv_wr32 0x%06x 0x%08x\n", addr, data);
107 iowrite32_native(data, subdev->mmio + addr);
108}
109
110static inline u32
111nv_mask(void *obj, u32 addr, u32 mask, u32 data)
112{
113 u32 temp = nv_rd32(obj, addr);
114 nv_wr32(obj, addr, (temp & ~mask) | data);
115 return temp;
116}
117
118#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 000000000000..75d1ed5f85fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_BSP_H__
2#define __NOUVEAU_BSP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_bsp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_bsp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_bsp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_bsp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_bsp_context_init _nouveau_engctx_init
22#define _nouveau_bsp_context_fini _nouveau_engctx_fini
23#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_bsp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_bsp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
32#define nouveau_bsp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_bsp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_bsp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_bsp_dtor _nouveau_engine_dtor
40#define _nouveau_bsp_init _nouveau_engine_init
41#define _nouveau_bsp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_bsp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 000000000000..70b9d8c5fcf5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,49 @@
1#ifndef __NOUVEAU_COPY_H__
2#define __NOUVEAU_COPY_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_copy_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_copy_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_copy_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_copy_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
21#define _nouveau_copy_context_init _nouveau_engctx_init
22#define _nouveau_copy_context_fini _nouveau_engctx_fini
23#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_copy {
27 struct nouveau_engine base;
28};
29
30#define nouveau_copy_create(p,e,c,y,i,d) \
31 nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
32#define nouveau_copy_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_copy_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_copy_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_copy_dtor _nouveau_engine_dtor
40#define _nouveau_copy_init _nouveau_engine_init
41#define _nouveau_copy_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nva3_copy_oclass;
44extern struct nouveau_oclass nvc0_copy0_oclass;
45extern struct nouveau_oclass nvc0_copy1_oclass;
46extern struct nouveau_oclass nve0_copy0_oclass;
47extern struct nouveau_oclass nve0_copy1_oclass;
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 000000000000..e3674743baaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,46 @@
1#ifndef __NOUVEAU_CRYPT_H__
2#define __NOUVEAU_CRYPT_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_crypt_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_crypt_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_crypt_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_crypt_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
21#define _nouveau_crypt_context_init _nouveau_engctx_init
22#define _nouveau_crypt_context_fini _nouveau_engctx_fini
23#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_crypt {
27 struct nouveau_engine base;
28};
29
30#define nouveau_crypt_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
32#define nouveau_crypt_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_crypt_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_crypt_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_crypt_dtor _nouveau_engine_dtor
40#define _nouveau_crypt_init _nouveau_engine_init
41#define _nouveau_crypt_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_crypt_oclass;
44extern struct nouveau_oclass nv98_crypt_oclass;
45
46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 000000000000..38ec1252cbaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,44 @@
1#ifndef __NOUVEAU_DISP_H__
2#define __NOUVEAU_DISP_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6#include <core/device.h>
7
8struct nouveau_disp {
9 struct nouveau_engine base;
10
11 struct {
12 struct list_head list;
13 spinlock_t lock;
14 void (*notify)(void *, int);
15 void (*get)(void *, int);
16 void (*put)(void *, int);
17 void *data;
18 } vblank;
19};
20
21static inline struct nouveau_disp *
22nouveau_disp(void *obj)
23{
24 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
25}
26
27#define nouveau_disp_create(p,e,c,i,x,d) \
28 nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
29#define nouveau_disp_destroy(d) \
30 nouveau_engine_destroy(&(d)->base)
31#define nouveau_disp_init(d) \
32 nouveau_engine_init(&(d)->base)
33#define nouveau_disp_fini(d,s) \
34 nouveau_engine_fini(&(d)->base, (s))
35
36#define _nouveau_disp_dtor _nouveau_engine_dtor
37#define _nouveau_disp_init _nouveau_engine_init
38#define _nouveau_disp_fini _nouveau_engine_fini
39
40extern struct nouveau_oclass nv04_disp_oclass;
41extern struct nouveau_oclass nv50_disp_oclass;
42extern struct nouveau_oclass nvd0_disp_oclass;
43
44#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 000000000000..700ccbb1941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,57 @@
1#ifndef __NOUVEAU_DMAOBJ_H__
2#define __NOUVEAU_DMAOBJ_H__
3
4#include <core/object.h>
5#include <core/engine.h>
6
7struct nouveau_gpuobj;
8
9struct nouveau_dmaobj {
10 struct nouveau_object base;
11 u32 target;
12 u32 access;
13 u64 start;
14 u64 limit;
15};
16
17#define nouveau_dmaobj_create(p,e,c,a,s,d) \
18 nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
19#define nouveau_dmaobj_destroy(p) \
20 nouveau_object_destroy(&(p)->base)
21#define nouveau_dmaobj_init(p) \
22 nouveau_object_init(&(p)->base)
23#define nouveau_dmaobj_fini(p,s) \
24 nouveau_object_fini(&(p)->base, (s))
25
26int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *data, u32 size,
28 int length, void **);
29
30#define _nouveau_dmaobj_dtor nouveau_object_destroy
31#define _nouveau_dmaobj_init nouveau_object_init
32#define _nouveau_dmaobj_fini nouveau_object_fini
33
34struct nouveau_dmaeng {
35 struct nouveau_engine base;
36 int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
37 struct nouveau_dmaobj *, struct nouveau_gpuobj **);
38};
39
40#define nouveau_dmaeng_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
42#define nouveau_dmaeng_destroy(p) \
43 nouveau_engine_destroy(&(p)->base)
44#define nouveau_dmaeng_init(p) \
45 nouveau_engine_init(&(p)->base)
46#define nouveau_dmaeng_fini(p,s) \
47 nouveau_engine_fini(&(p)->base, (s))
48
49#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
50#define _nouveau_dmaeng_init _nouveau_engine_init
51#define _nouveau_dmaeng_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_dmaeng_oclass;
54extern struct nouveau_oclass nv50_dmaeng_oclass;
55extern struct nouveau_oclass nvc0_dmaeng_oclass;
56
57#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
new file mode 100644
index 000000000000..d67fed1e3970
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -0,0 +1,111 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4#include <core/namedb.h>
5#include <core/gpuobj.h>
6#include <core/engine.h>
7
8struct nouveau_fifo_chan {
9 struct nouveau_namedb base;
10 struct nouveau_dmaobj *pushdma;
11 struct nouveau_gpuobj *pushgpu;
12 void __iomem *user;
13 u32 size;
14 u16 chid;
15 atomic_t refcnt; /* NV04_NVSW_SET_REF */
16};
17
18static inline struct nouveau_fifo_chan *
19nouveau_fifo_chan(void *obj)
20{
21 return (void *)nv_namedb(obj);
22}
23
24#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
25 nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
26 (m), sizeof(**d), (void **)d)
27#define nouveau_fifo_channel_init(p) \
28 nouveau_namedb_init(&(p)->base)
29#define nouveau_fifo_channel_fini(p,s) \
30 nouveau_namedb_fini(&(p)->base, (s))
31
32int nouveau_fifo_channel_create_(struct nouveau_object *,
33 struct nouveau_object *,
34 struct nouveau_oclass *,
35 int bar, u32 addr, u32 size, u32 push,
36 u32 engmask, int len, void **);
37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
38
39#define _nouveau_fifo_channel_init _nouveau_namedb_init
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41
42void _nouveau_fifo_channel_dtor(struct nouveau_object *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
45
46struct nouveau_fifo_base {
47 struct nouveau_gpuobj base;
48};
49
50#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
51 nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
52#define nouveau_fifo_context_destroy(p) \
53 nouveau_gpuobj_destroy(&(p)->base)
54#define nouveau_fifo_context_init(p) \
55 nouveau_gpuobj_init(&(p)->base)
56#define nouveau_fifo_context_fini(p,s) \
57 nouveau_gpuobj_fini(&(p)->base, (s))
58
59#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
60#define _nouveau_fifo_context_init _nouveau_gpuobj_init
61#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
62#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
63#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
64
65struct nouveau_fifo {
66 struct nouveau_engine base;
67
68 struct nouveau_object **channel;
69 spinlock_t lock;
70 u16 min;
71 u16 max;
72
73 int (*chid)(struct nouveau_fifo *, struct nouveau_object *);
74 void (*pause)(struct nouveau_fifo *, unsigned long *);
75 void (*start)(struct nouveau_fifo *, unsigned long *);
76};
77
78static inline struct nouveau_fifo *
79nouveau_fifo(void *obj)
80{
81 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
82}
83
84#define nouveau_fifo_create(o,e,c,fc,lc,d) \
85 nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
86#define nouveau_fifo_init(p) \
87 nouveau_engine_init(&(p)->base)
88#define nouveau_fifo_fini(p,s) \
89 nouveau_engine_fini(&(p)->base, (s))
90
91int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
92 struct nouveau_oclass *, int min, int max,
93 int size, void **);
94void nouveau_fifo_destroy(struct nouveau_fifo *);
95
96#define _nouveau_fifo_init _nouveau_engine_init
97#define _nouveau_fifo_fini _nouveau_engine_fini
98
99extern struct nouveau_oclass nv04_fifo_oclass;
100extern struct nouveau_oclass nv10_fifo_oclass;
101extern struct nouveau_oclass nv17_fifo_oclass;
102extern struct nouveau_oclass nv40_fifo_oclass;
103extern struct nouveau_oclass nv50_fifo_oclass;
104extern struct nouveau_oclass nv84_fifo_oclass;
105extern struct nouveau_oclass nvc0_fifo_oclass;
106extern struct nouveau_oclass nve0_fifo_oclass;
107
108void nv04_fifo_intr(struct nouveau_subdev *);
109int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
110
111#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 000000000000..6943b40d0817
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,72 @@
1#ifndef __NOUVEAU_GRAPH_H__
2#define __NOUVEAU_GRAPH_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6#include <core/enum.h>
7
8struct nouveau_graph_chan {
9 struct nouveau_engctx base;
10};
11
12#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
13 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
14#define nouveau_graph_context_destroy(d) \
15 nouveau_engctx_destroy(&(d)->base)
16#define nouveau_graph_context_init(d) \
17 nouveau_engctx_init(&(d)->base)
18#define nouveau_graph_context_fini(d,s) \
19 nouveau_engctx_fini(&(d)->base, (s))
20
21#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
22#define _nouveau_graph_context_init _nouveau_engctx_init
23#define _nouveau_graph_context_fini _nouveau_engctx_fini
24#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
25#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
26
27struct nouveau_graph {
28 struct nouveau_engine base;
29};
30
31static inline struct nouveau_graph *
32nouveau_graph(void *obj)
33{
34 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
35}
36
37#define nouveau_graph_create(p,e,c,y,d) \
38 nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
39#define nouveau_graph_destroy(d) \
40 nouveau_engine_destroy(&(d)->base)
41#define nouveau_graph_init(d) \
42 nouveau_engine_init(&(d)->base)
43#define nouveau_graph_fini(d,s) \
44 nouveau_engine_fini(&(d)->base, (s))
45
46#define _nouveau_graph_dtor _nouveau_engine_dtor
47#define _nouveau_graph_init _nouveau_engine_init
48#define _nouveau_graph_fini _nouveau_engine_fini
49
50extern struct nouveau_oclass nv04_graph_oclass;
51extern struct nouveau_oclass nv10_graph_oclass;
52extern struct nouveau_oclass nv20_graph_oclass;
53extern struct nouveau_oclass nv25_graph_oclass;
54extern struct nouveau_oclass nv2a_graph_oclass;
55extern struct nouveau_oclass nv30_graph_oclass;
56extern struct nouveau_oclass nv34_graph_oclass;
57extern struct nouveau_oclass nv35_graph_oclass;
58extern struct nouveau_oclass nv40_graph_oclass;
59extern struct nouveau_oclass nv50_graph_oclass;
60extern struct nouveau_oclass nvc0_graph_oclass;
61extern struct nouveau_oclass nve0_graph_oclass;
62
63extern const struct nouveau_bitfield nv04_graph_nsource[];
64extern struct nouveau_ofuncs nv04_graph_ofuncs;
65bool nv04_graph_idle(void *obj);
66
67extern const struct nouveau_bitfield nv10_graph_intr_name[];
68extern const struct nouveau_bitfield nv10_graph_nstatus[];
69
70extern const struct nouveau_enum nv50_data_error_names[];
71
72#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 000000000000..bbf0d4a5bbd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
1#ifndef __NOUVEAU_MPEG_H__
2#define __NOUVEAU_MPEG_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_mpeg_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_mpeg_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_mpeg_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_mpeg_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
21#define _nouveau_mpeg_context_init _nouveau_engctx_init
22#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
23#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_mpeg {
27 struct nouveau_engine base;
28};
29
30#define nouveau_mpeg_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
32#define nouveau_mpeg_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_mpeg_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_mpeg_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_mpeg_dtor _nouveau_engine_dtor
40#define _nouveau_mpeg_init _nouveau_engine_init
41#define _nouveau_mpeg_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv31_mpeg_oclass;
44extern struct nouveau_oclass nv40_mpeg_oclass;
45extern struct nouveau_oclass nv50_mpeg_oclass;
46extern struct nouveau_oclass nv84_mpeg_oclass;
47
48extern struct nouveau_oclass nv31_mpeg_sclass[];
49void nv31_mpeg_intr(struct nouveau_subdev *);
50void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
51int nv31_mpeg_init(struct nouveau_object *);
52
53extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
54int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, void *, u32,
56 struct nouveau_object **);
57int nv50_mpeg_tlb_flush(struct nouveau_engine *);
58void nv50_mpeg_intr(struct nouveau_subdev *);
59int nv50_mpeg_init(struct nouveau_object *);
60
61#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 000000000000..74d554fb3281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_PPP_H__
2#define __NOUVEAU_PPP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_ppp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_ppp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_ppp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_ppp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_ppp_context_init _nouveau_engctx_init
22#define _nouveau_ppp_context_fini _nouveau_engctx_fini
23#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_ppp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_ppp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
32#define nouveau_ppp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_ppp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_ppp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_ppp_dtor _nouveau_engine_dtor
40#define _nouveau_ppp_init _nouveau_engine_init
41#define _nouveau_ppp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv98_ppp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 000000000000..c945691c8564
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_software_chan {
8 struct nouveau_engctx base;
9
10 struct {
11 struct list_head head;
12 u32 channel;
13 u32 ctxdma;
14 u64 offset;
15 u32 value;
16 u32 crtc;
17 } vblank;
18
19 int (*flip)(void *);
20 void *flip_data;
21};
22
23#define nouveau_software_context_create(p,e,c,d) \
24 nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
25#define nouveau_software_context_destroy(d) \
26 nouveau_engctx_destroy(&(d)->base)
27#define nouveau_software_context_init(d) \
28 nouveau_engctx_init(&(d)->base)
29#define nouveau_software_context_fini(d,s) \
30 nouveau_engctx_fini(&(d)->base, (s))
31
32#define _nouveau_software_context_dtor _nouveau_engctx_dtor
33#define _nouveau_software_context_init _nouveau_engctx_init
34#define _nouveau_software_context_fini _nouveau_engctx_fini
35
36struct nouveau_software {
37 struct nouveau_engine base;
38};
39
40#define nouveau_software_create(p,e,c,d) \
41 nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
42#define nouveau_software_destroy(d) \
43 nouveau_engine_destroy(&(d)->base)
44#define nouveau_software_init(d) \
45 nouveau_engine_init(&(d)->base)
46#define nouveau_software_fini(d,s) \
47 nouveau_engine_fini(&(d)->base, (s))
48
49#define _nouveau_software_dtor _nouveau_engine_dtor
50#define _nouveau_software_init _nouveau_engine_init
51#define _nouveau_software_fini _nouveau_engine_fini
52
53extern struct nouveau_oclass nv04_software_oclass;
54extern struct nouveau_oclass nv10_software_oclass;
55extern struct nouveau_oclass nv50_software_oclass;
56extern struct nouveau_oclass nvc0_software_oclass;
57
58void nv04_software_intr(struct nouveau_subdev *);
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 000000000000..05cd08fba377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,45 @@
1#ifndef __NOUVEAU_VP_H__
2#define __NOUVEAU_VP_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_vp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_vp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_vp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_vp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_vp_context_init _nouveau_engctx_init
22#define _nouveau_vp_context_fini _nouveau_engctx_fini
23#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_vp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_vp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
32#define nouveau_vp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_vp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_vp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_vp_dtor _nouveau_engine_dtor
40#define _nouveau_vp_init _nouveau_engine_init
41#define _nouveau_vp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_vp_oclass;
44
45#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
new file mode 100644
index 000000000000..4f4ff4502c3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -0,0 +1,55 @@
1#ifndef __NOUVEAU_BAR_H__
2#define __NOUVEAU_BAR_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/fb.h>
8
9struct nouveau_vma;
10
11struct nouveau_bar {
12 struct nouveau_subdev base;
13
14 int (*alloc)(struct nouveau_bar *, struct nouveau_object *,
15 struct nouveau_mem *, struct nouveau_object **);
16 void __iomem *iomem;
17
18 int (*kmap)(struct nouveau_bar *, struct nouveau_mem *,
19 u32 flags, struct nouveau_vma *);
20 int (*umap)(struct nouveau_bar *, struct nouveau_mem *,
21 u32 flags, struct nouveau_vma *);
22 void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
23 void (*flush)(struct nouveau_bar *);
24};
25
26static inline struct nouveau_bar *
27nouveau_bar(void *obj)
28{
29 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BAR];
30}
31
32#define nouveau_bar_create(p,e,o,d) \
33 nouveau_bar_create_((p), (e), (o), sizeof(**d), (void **)d)
34#define nouveau_bar_init(p) \
35 nouveau_subdev_init(&(p)->base)
36#define nouveau_bar_fini(p,s) \
37 nouveau_subdev_fini(&(p)->base, (s))
38
39int nouveau_bar_create_(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, int, void **);
41void nouveau_bar_destroy(struct nouveau_bar *);
42
43void _nouveau_bar_dtor(struct nouveau_object *);
44#define _nouveau_bar_init _nouveau_subdev_init
45#define _nouveau_bar_fini _nouveau_subdev_fini
46
47extern struct nouveau_oclass nv50_bar_oclass;
48extern struct nouveau_oclass nvc0_bar_oclass;
49
50int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
51 struct nouveau_mem *, struct nouveau_object **);
52
53void nv84_bar_flush(struct nouveau_bar *);
54
55#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
new file mode 100644
index 000000000000..d145b25e6be4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -0,0 +1,34 @@
1#ifndef __NOUVEAU_BIOS_H__
2#define __NOUVEAU_BIOS_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_bios {
8 struct nouveau_subdev base;
9 u32 size;
10 u8 *data;
11
12 u32 bmp_offset;
13 u32 bit_offset;
14
15 struct {
16 u8 major;
17 u8 chip;
18 u8 minor;
19 u8 micro;
20 } version;
21};
22
23static inline struct nouveau_bios *
24nouveau_bios(void *obj)
25{
26 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VBIOS];
27}
28
29u8 nvbios_checksum(const u8 *data, int size);
30u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
31
32extern struct nouveau_oclass nouveau_bios_oclass;
33
34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
new file mode 100644
index 000000000000..73f060b07981
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bit.h
@@ -0,0 +1,13 @@
1#ifndef __NVBIOS_BIT_H__
2#define __NVBIOS_BIT_H__
3
4struct bit_entry {
5 u8 id;
6 u8 version;
7 u16 length;
8 u16 offset;
9};
10
11int bit_entry(struct nouveau_bios *, u8 id, struct bit_entry *);
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
new file mode 100644
index 000000000000..10e4dbca649a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/bmp.h
@@ -0,0 +1,39 @@
1#ifndef __NVBIOS_BMP_H__
2#define __NVBIOS_BMP_H__
3
4static inline u16
5bmp_version(struct nouveau_bios *bios)
6{
7 if (bios->bmp_offset) {
8 return nv_ro08(bios, bios->bmp_offset + 5) << 8 |
9 nv_ro08(bios, bios->bmp_offset + 6);
10 }
11
12 return 0x0000;
13}
14
15static inline u16
16bmp_mem_init_table(struct nouveau_bios *bios)
17{
18 if (bmp_version(bios) >= 0x0300)
19 return nv_ro16(bios, bios->bmp_offset + 24);
20 return 0x0000;
21}
22
23static inline u16
24bmp_sdr_seq_table(struct nouveau_bios *bios)
25{
26 if (bmp_version(bios) >= 0x0300)
27 return nv_ro16(bios, bios->bmp_offset + 26);
28 return 0x0000;
29}
30
31static inline u16
32bmp_ddr_seq_table(struct nouveau_bios *bios)
33{
34 if (bmp_version(bios) >= 0x0300)
35 return nv_ro16(bios, bios->bmp_offset + 28);
36 return 0x0000;
37}
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
new file mode 100644
index 000000000000..c1270548fd0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/conn.h
@@ -0,0 +1,27 @@
1#ifndef __NVBIOS_CONN_H__
2#define __NVBIOS_CONN_H__
3
4enum dcb_connector_type {
5 DCB_CONNECTOR_VGA = 0x00,
6 DCB_CONNECTOR_TV_0 = 0x10,
7 DCB_CONNECTOR_TV_1 = 0x11,
8 DCB_CONNECTOR_TV_3 = 0x13,
9 DCB_CONNECTOR_DVI_I = 0x30,
10 DCB_CONNECTOR_DVI_D = 0x31,
11 DCB_CONNECTOR_DMS59_0 = 0x38,
12 DCB_CONNECTOR_DMS59_1 = 0x39,
13 DCB_CONNECTOR_LVDS = 0x40,
14 DCB_CONNECTOR_LVDS_SPWG = 0x41,
15 DCB_CONNECTOR_DP = 0x46,
16 DCB_CONNECTOR_eDP = 0x47,
17 DCB_CONNECTOR_HDMI_0 = 0x60,
18 DCB_CONNECTOR_HDMI_1 = 0x61,
19 DCB_CONNECTOR_DMS59_DP0 = 0x64,
20 DCB_CONNECTOR_DMS59_DP1 = 0x65,
21 DCB_CONNECTOR_NONE = 0xff
22};
23
24u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
25u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len);
26
27#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
new file mode 100644
index 000000000000..d682fb625833
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -0,0 +1,90 @@
1#ifndef __NVBIOS_DCB_H__
2#define __NVBIOS_DCB_H__
3
4struct nouveau_bios;
5
6enum dcb_output_type {
7 DCB_OUTPUT_ANALOG = 0x0,
8 DCB_OUTPUT_TV = 0x1,
9 DCB_OUTPUT_TMDS = 0x2,
10 DCB_OUTPUT_LVDS = 0x3,
11 DCB_OUTPUT_DP = 0x6,
12 DCB_OUTPUT_EOL = 0xe,
13 DCB_OUTPUT_UNUSED = 0xf,
14 DCB_OUTPUT_ANY = -1,
15};
16
17struct dcb_output {
18 int index; /* may not be raw dcb index if merging has happened */
19 enum dcb_output_type type;
20 uint8_t i2c_index;
21 uint8_t heads;
22 uint8_t connector;
23 uint8_t bus;
24 uint8_t location;
25 uint8_t or;
26 bool duallink_possible;
27 union {
28 struct sor_conf {
29 int link;
30 } sorconf;
31 struct {
32 int maxfreq;
33 } crtconf;
34 struct {
35 struct sor_conf sor;
36 bool use_straps_for_mode;
37 bool use_acpi_for_edid;
38 bool use_power_scripts;
39 } lvdsconf;
40 struct {
41 bool has_component_output;
42 } tvconf;
43 struct {
44 struct sor_conf sor;
45 int link_nr;
46 int link_bw;
47 } dpconf;
48 struct {
49 struct sor_conf sor;
50 int slave_addr;
51 } tmdsconf;
52 };
53 bool i2c_upper_default;
54};
55
56u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
57u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
58int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
59 (struct nouveau_bios *, void *, int index, u16 entry));
60
61
62/* BIT 'U'/'d' table encoder subtables have hashes matching them to
63 * a particular set of encoders.
64 *
65 * This function returns true if a particular DCB entry matches.
66 */
67static inline bool
68dcb_hash_match(struct dcb_output *dcb, u32 hash)
69{
70 if ((hash & 0x000000f0) != (dcb->location << 4))
71 return false;
72 if ((hash & 0x0000000f) != dcb->type)
73 return false;
74 if (!(hash & (dcb->or << 16)))
75 return false;
76
77 switch (dcb->type) {
78 case DCB_OUTPUT_TMDS:
79 case DCB_OUTPUT_LVDS:
80 case DCB_OUTPUT_DP:
81 if (hash & 0x00c00000) {
82 if (!(hash & (dcb->sorconf.link << 22)))
83 return false;
84 }
85 default:
86 return true;
87 }
88}
89
90#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
new file mode 100644
index 000000000000..73b5e5d3e75a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -0,0 +1,8 @@
1#ifndef __NVBIOS_DP_H__
2#define __NVBIOS_DP_H__
3
4u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
5u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
6u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
new file mode 100644
index 000000000000..949fee3af8fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/extdev.h
@@ -0,0 +1,30 @@
1#ifndef __NVBIOS_EXTDEV_H__
2#define __NVBIOS_EXTDEV_H__
3
4struct nouveau_bios;
5
6enum nvbios_extdev_type {
7 NVBIOS_EXTDEV_LM89 = 0x02,
8 NVBIOS_EXTDEV_VT1103M = 0x40,
9 NVBIOS_EXTDEV_PX3540 = 0x41,
10 NVBIOS_EXTDEV_VT1105M = 0x42, /* or close enough... */
11 NVBIOS_EXTDEV_ADT7473 = 0x70, /* can also be a LM64 */
12 NVBIOS_EXTDEV_HDCP_EEPROM = 0x90,
13 NVBIOS_EXTDEV_NONE = 0xff,
14};
15
16struct nvbios_extdev_func {
17 u8 type;
18 u8 addr;
19 u8 bus;
20};
21
22int
23nvbios_extdev_parse(struct nouveau_bios *, int, struct nvbios_extdev_func *);
24
25int
26nvbios_extdev_find(struct nouveau_bios *, enum nvbios_extdev_type,
27 struct nvbios_extdev_func *);
28
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
new file mode 100644
index 000000000000..2bf178082a36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -0,0 +1,33 @@
1#ifndef __NVBIOS_GPIO_H__
2#define __NVBIOS_GPIO_H__
3
4struct nouveau_bios;
5
6enum dcb_gpio_func_name {
7 DCB_GPIO_PANEL_POWER = 0x01,
8 DCB_GPIO_TVDAC0 = 0x0c,
9 DCB_GPIO_TVDAC1 = 0x2d,
10 DCB_GPIO_PWM_FAN = 0x09,
11 DCB_GPIO_FAN_SENSE = 0x3d,
12 DCB_GPIO_UNUSED = 0xff
13};
14
15struct dcb_gpio_func {
16 u8 func;
17 u8 line;
18 u8 log[2];
19
20 /* so far, "param" seems to only have an influence on PWM-related
21 * GPIOs such as FAN_CONTROL and PANEL_BACKLIGHT_LEVEL.
22 * if param equals 1, hardware PWM is available
23 * if param equals 0, the host should toggle the GPIO itself
24 */
25 u8 param;
26};
27
28u16 dcb_gpio_table(struct nouveau_bios *);
29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver);
30int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line,
31 struct dcb_gpio_func *);
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
new file mode 100644
index 000000000000..5079bedfd985
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -0,0 +1,25 @@
1#ifndef __NVBIOS_I2C_H__
2#define __NVBIOS_I2C_H__
3
4struct nouveau_bios;
5
6enum dcb_i2c_type {
7 DCB_I2C_NV04_BIT = 0,
8 DCB_I2C_NV4E_BIT = 4,
9 DCB_I2C_NVIO_BIT = 5,
10 DCB_I2C_NVIO_AUX = 6,
11 DCB_I2C_UNUSED = 0xff
12};
13
14struct dcb_i2c_entry {
15 enum dcb_i2c_type type;
16 u8 drive;
17 u8 sense;
18 u32 data;
19};
20
21u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
22u16 dcb_i2c_entry(struct nouveau_bios *, u8 index, u8 *ver, u8 *len);
23int dcb_i2c_parse(struct nouveau_bios *, u8 index, struct dcb_i2c_entry *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
new file mode 100644
index 000000000000..e69a8bdc6e97
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -0,0 +1,21 @@
1#ifndef __NVBIOS_INIT_H__
2#define __NVBIOS_INIT_H__
3
4struct nvbios_init {
5 struct nouveau_subdev *subdev;
6 struct nouveau_bios *bios;
7 u16 offset;
8 struct dcb_output *outp;
9 int crtc;
10
11 /* internal state used during parsing */
12 u8 execute;
13 u32 nested;
14 u16 repeat;
15 u16 repend;
16};
17
18int nvbios_exec(struct nvbios_init *);
19int nvbios_init(struct nouveau_subdev *, bool execute);
20
21#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
new file mode 100644
index 000000000000..5572e60414e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/mxm.h
@@ -0,0 +1,9 @@
1#ifndef __NVBIOS_MXM_H__
2#define __NVBIOS_MXM_H__
3
4u16 mxm_table(struct nouveau_bios *, u8 *ver, u8 *hdr);
5
6u8 mxm_sor_map(struct nouveau_bios *, u8 conn);
7u8 mxm_ddc_map(struct nouveau_bios *, u8 port);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
new file mode 100644
index 000000000000..0b285e99be5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/perf.h
@@ -0,0 +1,14 @@
1#ifndef __NVBIOS_PERF_H__
2#define __NVBIOS_PERF_H__
3
4struct nouveau_bios;
5
6struct nvbios_perf_fan {
7 u32 pwm_divisor;
8};
9
10int
11nvbios_perf_fan_parse(struct nouveau_bios *, struct nvbios_perf_fan *);
12
13
14#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
new file mode 100644
index 000000000000..c345097592f2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -0,0 +1,77 @@
1#ifndef __NVBIOS_PLL_H__
2#define __NVBIOS_PLL_H__
3
4/*XXX: kill me */
5struct nouveau_pll_vals {
6 union {
7 struct {
8#ifdef __BIG_ENDIAN
9 uint8_t N1, M1, N2, M2;
10#else
11 uint8_t M1, N1, M2, N2;
12#endif
13 };
14 struct {
15 uint16_t NM1, NM2;
16 } __attribute__((packed));
17 };
18 int log2P;
19
20 int refclk;
21};
22
23struct nouveau_bios;
24
25/* these match types in pll limits table version 0x40,
26 * nouveau uses them on all chipsets internally where a
27 * specific pll needs to be referenced, but the exact
28 * register isn't known.
29 */
30enum nvbios_pll_type {
31 PLL_CORE = 0x01,
32 PLL_SHADER = 0x02,
33 PLL_UNK03 = 0x03,
34 PLL_MEMORY = 0x04,
35 PLL_VDEC = 0x05,
36 PLL_UNK40 = 0x40,
37 PLL_UNK41 = 0x41,
38 PLL_UNK42 = 0x42,
39 PLL_VPLL0 = 0x80,
40 PLL_VPLL1 = 0x81,
41 PLL_MAX = 0xff
42};
43
44struct nvbios_pll {
45 enum nvbios_pll_type type;
46 u32 reg;
47 u32 refclk;
48
49 u8 min_p;
50 u8 max_p;
51 u8 bias_p;
52
53 /*
54 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
55 * value) is no different to 6 (at least for vplls) so allowing the MNP
56 * calc to use 7 causes the generated clock to be out by a factor of 2.
57 * however, max_log2p cannot be fixed-up during parsing as the
58 * unmodified max_log2p value is still needed for setting mplls, hence
59 * an additional max_usable_log2p member
60 */
61 u8 max_p_usable;
62
63 struct {
64 u32 min_freq;
65 u32 max_freq;
66 u32 min_inputfreq;
67 u32 max_inputfreq;
68 u8 min_m;
69 u8 max_m;
70 u8 min_n;
71 u8 max_n;
72 } vco1, vco2;
73};
74
75int nvbios_pll_parse(struct nouveau_bios *, u32 type, struct nvbios_pll *);
76
77#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
new file mode 100644
index 000000000000..a2c4296fc5f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -0,0 +1,46 @@
1#ifndef __NVBIOS_THERM_H__
2#define __NVBIOS_THERM_H__
3
4struct nouveau_bios;
5
6struct nvbios_therm_threshold {
7 u8 temp;
8 u8 hysteresis;
9};
10
11struct nvbios_therm_sensor {
12 /* diode */
13 s16 slope_mult;
14 s16 slope_div;
15 s16 offset_num;
16 s16 offset_den;
17 s8 offset_constant;
18
19 /* thresholds */
20 struct nvbios_therm_threshold thrs_fan_boost;
21 struct nvbios_therm_threshold thrs_down_clock;
22 struct nvbios_therm_threshold thrs_critical;
23 struct nvbios_therm_threshold thrs_shutdown;
24};
25
26struct nvbios_therm_fan {
27 u16 pwm_freq;
28
29 u8 min_duty;
30 u8 max_duty;
31};
32
33enum nvbios_therm_domain {
34 NVBIOS_THERM_DOMAIN_CORE,
35 NVBIOS_THERM_DOMAIN_AMBIENT,
36};
37
38int
39nvbios_therm_sensor_parse(struct nouveau_bios *, enum nvbios_therm_domain,
40 struct nvbios_therm_sensor *);
41
42int
43nvbios_therm_fan_parse(struct nouveau_bios *, struct nvbios_therm_fan *);
44
45
46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
new file mode 100644
index 000000000000..39e73b91d360
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -0,0 +1,59 @@
1#ifndef __NOUVEAU_CLOCK_H__
2#define __NOUVEAU_CLOCK_H__
3
4#include <core/device.h>
5#include <core/subdev.h>
6
7struct nouveau_pll_vals;
8struct nvbios_pll;
9
10struct nouveau_clock {
11 struct nouveau_subdev base;
12
13 int (*pll_set)(struct nouveau_clock *, u32 type, u32 freq);
14
15 /*XXX: die, these are here *only* to support the completely
16 * bat-shit insane what-was-nouveau_hw.c code
17 */
18 int (*pll_calc)(struct nouveau_clock *, struct nvbios_pll *,
19 int clk, struct nouveau_pll_vals *pv);
20 int (*pll_prog)(struct nouveau_clock *, u32 reg1,
21 struct nouveau_pll_vals *pv);
22};
23
24static inline struct nouveau_clock *
25nouveau_clock(void *obj)
26{
27 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_CLOCK];
28}
29
30#define nouveau_clock_create(p,e,o,d) \
31 nouveau_subdev_create((p), (e), (o), 0, "CLOCK", "clock", d)
32#define nouveau_clock_destroy(p) \
33 nouveau_subdev_destroy(&(p)->base)
34#define nouveau_clock_init(p) \
35 nouveau_subdev_init(&(p)->base)
36#define nouveau_clock_fini(p,s) \
37 nouveau_subdev_fini(&(p)->base, (s))
38
39int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
40 struct nouveau_oclass *, void *, u32, int, void **);
41
42#define _nouveau_clock_dtor _nouveau_subdev_dtor
43#define _nouveau_clock_init _nouveau_subdev_init
44#define _nouveau_clock_fini _nouveau_subdev_fini
45
46extern struct nouveau_oclass nv04_clock_oclass;
47extern struct nouveau_oclass nv40_clock_oclass;
48extern struct nouveau_oclass nv50_clock_oclass;
49extern struct nouveau_oclass nva3_clock_oclass;
50extern struct nouveau_oclass nvc0_clock_oclass;
51
52int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
53int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
54 int clk, struct nouveau_pll_vals *);
55int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
56 struct nouveau_pll_vals *);
57
58
59#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
new file mode 100644
index 000000000000..c9e4c4afa50e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
@@ -0,0 +1,24 @@
1#ifndef __NOUVEAU_SUBDEV_DEVICE_H__
2#define __NOUVEAU_SUBDEV_DEVICE_H__
3
4#include <core/device.h>
5
6#define nouveau_device_create(p,n,s,c,d,u) \
7 nouveau_device_create_((p), (n), (s), (c), (d), sizeof(**u), (void **)u)
8
9int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
10 const char *cfg, const char *dbg, int, void **);
11
12int nv04_identify(struct nouveau_device *);
13int nv10_identify(struct nouveau_device *);
14int nv20_identify(struct nouveau_device *);
15int nv30_identify(struct nouveau_device *);
16int nv40_identify(struct nouveau_device *);
17int nv50_identify(struct nouveau_device *);
18int nvc0_identify(struct nouveau_device *);
19int nve0_identify(struct nouveau_device *);
20
21extern struct nouveau_oclass nouveau_device_sclass[];
22struct nouveau_device *nouveau_device_find(u64 name);
23
24#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
new file mode 100644
index 000000000000..29e4cc1f6cc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -0,0 +1,40 @@
1#ifndef __NOUVEAU_DEVINIT_H__
2#define __NOUVEAU_DEVINIT_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_devinit {
8 struct nouveau_subdev base;
9 bool post;
10 void (*meminit)(struct nouveau_devinit *);
11};
12
13static inline struct nouveau_devinit *
14nouveau_devinit(void *obj)
15{
16 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_DEVINIT];
17}
18
19#define nouveau_devinit_create(p,e,o,d) \
20 nouveau_devinit_create_((p), (e), (o), sizeof(**d), (void **)d)
21#define nouveau_devinit_destroy(p) \
22 nouveau_subdev_destroy(&(p)->base)
23
24int nouveau_devinit_create_(struct nouveau_object *, struct nouveau_object *,
25 struct nouveau_oclass *, int, void **);
26int nouveau_devinit_init(struct nouveau_devinit *);
27int nouveau_devinit_fini(struct nouveau_devinit *, bool suspend);
28
29extern struct nouveau_oclass nv04_devinit_oclass;
30extern struct nouveau_oclass nv05_devinit_oclass;
31extern struct nouveau_oclass nv10_devinit_oclass;
32extern struct nouveau_oclass nv1a_devinit_oclass;
33extern struct nouveau_oclass nv20_devinit_oclass;
34extern struct nouveau_oclass nv50_devinit_oclass;
35
36void nv04_devinit_dtor(struct nouveau_object *);
37int nv04_devinit_init(struct nouveau_object *);
38int nv04_devinit_fini(struct nouveau_object *, bool);
39
40#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
new file mode 100644
index 000000000000..5c1b5e1904f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -0,0 +1,134 @@
1#ifndef __NOUVEAU_FB_H__
2#define __NOUVEAU_FB_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6#include <core/mm.h>
7
8#include <subdev/vm.h>
9
10/* memory type/access flags, do not match hardware values */
11#define NV_MEM_ACCESS_RO 1
12#define NV_MEM_ACCESS_WO 2
13#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
14#define NV_MEM_ACCESS_SYS 4
15#define NV_MEM_ACCESS_VM 8
16#define NV_MEM_ACCESS_NOSNOOP 16
17
18#define NV_MEM_TARGET_VRAM 0
19#define NV_MEM_TARGET_PCI 1
20#define NV_MEM_TARGET_PCI_NOSNOOP 2
21#define NV_MEM_TARGET_VM 3
22#define NV_MEM_TARGET_GART 4
23
24#define NV_MEM_TYPE_VM 0x7f
25#define NV_MEM_COMP_VM 0x03
26
27struct nouveau_mem {
28 struct drm_device *dev;
29
30 struct nouveau_vma bar_vma;
31 struct nouveau_vma vma[2];
32 u8 page_shift;
33
34 struct nouveau_mm_node *tag;
35 struct list_head regions;
36 dma_addr_t *pages;
37 u32 memtype;
38 u64 offset;
39 u64 size;
40 struct sg_table *sg;
41};
42
43struct nouveau_fb_tile {
44 struct nouveau_mm_node *tag;
45 u32 addr;
46 u32 limit;
47 u32 pitch;
48 u32 zcomp;
49};
50
51struct nouveau_fb {
52 struct nouveau_subdev base;
53
54 bool (*memtype_valid)(struct nouveau_fb *, u32 memtype);
55
56 struct {
57 enum {
58 NV_MEM_TYPE_UNKNOWN = 0,
59 NV_MEM_TYPE_STOLEN,
60 NV_MEM_TYPE_SGRAM,
61 NV_MEM_TYPE_SDRAM,
62 NV_MEM_TYPE_DDR1,
63 NV_MEM_TYPE_DDR2,
64 NV_MEM_TYPE_DDR3,
65 NV_MEM_TYPE_GDDR2,
66 NV_MEM_TYPE_GDDR3,
67 NV_MEM_TYPE_GDDR4,
68 NV_MEM_TYPE_GDDR5
69 } type;
70 u64 stolen;
71 u64 size;
72 int ranks;
73
74 int (*get)(struct nouveau_fb *, u64 size, u32 align,
75 u32 size_nc, u32 type, struct nouveau_mem **);
76 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
77 } ram;
78
79 struct nouveau_mm vram;
80 struct nouveau_mm tags;
81
82 struct {
83 struct nouveau_fb_tile region[16];
84 int regions;
85 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
86 u32 pitch, u32 flags, struct nouveau_fb_tile *);
87 void (*fini)(struct nouveau_fb *, int i,
88 struct nouveau_fb_tile *);
89 void (*prog)(struct nouveau_fb *, int i,
90 struct nouveau_fb_tile *);
91 } tile;
92};
93
94static inline struct nouveau_fb *
95nouveau_fb(void *obj)
96{
97 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
98}
99
100#define nouveau_fb_create(p,e,c,d) \
101 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
102int nouveau_fb_created(struct nouveau_fb *);
103void nouveau_fb_destroy(struct nouveau_fb *);
104int nouveau_fb_init(struct nouveau_fb *);
105#define nouveau_fb_fini(p,s) \
106 nouveau_subdev_fini(&(p)->base, (s))
107
108void _nouveau_fb_dtor(struct nouveau_object *);
109int _nouveau_fb_init(struct nouveau_object *);
110#define _nouveau_fb_fini _nouveau_subdev_fini
111
112extern struct nouveau_oclass nv04_fb_oclass;
113extern struct nouveau_oclass nv10_fb_oclass;
114extern struct nouveau_oclass nv20_fb_oclass;
115extern struct nouveau_oclass nv30_fb_oclass;
116extern struct nouveau_oclass nv40_fb_oclass;
117extern struct nouveau_oclass nv50_fb_oclass;
118extern struct nouveau_oclass nvc0_fb_oclass;
119
120struct nouveau_bios;
121int nouveau_fb_bios_memtype(struct nouveau_bios *);
122
123bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
124
125void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
126
127void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
128 u32 pitch, u32 flags, struct nouveau_fb_tile *);
129void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
130
131void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
132void nv50_fb_trap(struct nouveau_fb *, int display);
133
134#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
new file mode 100644
index 000000000000..9ea2b12cc15d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -0,0 +1,64 @@
1#ifndef __NOUVEAU_GPIO_H__
2#define __NOUVEAU_GPIO_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/bios.h>
8#include <subdev/bios/gpio.h>
9
10struct nouveau_gpio {
11 struct nouveau_subdev base;
12
13 /* hardware interfaces */
14 void (*reset)(struct nouveau_gpio *);
15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
16 int (*sense)(struct nouveau_gpio *, int line);
17 void (*irq_enable)(struct nouveau_gpio *, int line, bool);
18
19 /* software interfaces */
20 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
21 struct dcb_gpio_func *);
22 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
23 int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
24 int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
25
26 /* interrupt handling */
27 struct list_head isr;
28 spinlock_t lock;
29
30 void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
31 int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
32 void (*)(void *, int state), void *data);
33 void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
34 void (*)(void *, int state), void *data);
35};
36
37static inline struct nouveau_gpio *
38nouveau_gpio(void *obj)
39{
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
41}
42
43#define nouveau_gpio_create(p,e,o,d) \
44 nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d)
45#define nouveau_gpio_destroy(p) \
46 nouveau_subdev_destroy(&(p)->base)
47#define nouveau_gpio_fini(p,s) \
48 nouveau_subdev_fini(&(p)->base, (s))
49
50int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
51 struct nouveau_oclass *, int, void **);
52int nouveau_gpio_init(struct nouveau_gpio *);
53
54extern struct nouveau_oclass nv10_gpio_oclass;
55extern struct nouveau_oclass nv50_gpio_oclass;
56extern struct nouveau_oclass nvd0_gpio_oclass;
57
58void nv50_gpio_dtor(struct nouveau_object *);
59int nv50_gpio_init(struct nouveau_object *);
60int nv50_gpio_fini(struct nouveau_object *, bool);
61void nv50_gpio_intr(struct nouveau_subdev *);
62void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
63
64#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
new file mode 100644
index 000000000000..b93ab01e3785
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -0,0 +1,60 @@
1#ifndef __NOUVEAU_I2C_H__
2#define __NOUVEAU_I2C_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#include <subdev/bios.h>
8#include <subdev/bios/i2c.h>
9
10#define NV_I2C_PORT(n) (0x00 + (n))
11#define NV_I2C_DEFAULT(n) (0x80 + (n))
12
13struct nouveau_i2c_port {
14 struct i2c_adapter adapter;
15 struct nouveau_i2c *i2c;
16 struct i2c_algo_bit_data bit;
17 struct list_head head;
18 u8 index;
19 u8 type;
20 u32 dcb;
21 u32 drive;
22 u32 sense;
23 u32 state;
24};
25
26struct nouveau_i2c {
27 struct nouveau_subdev base;
28
29 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
30 int (*identify)(struct nouveau_i2c *, int index,
31 const char *what, struct i2c_board_info *,
32 bool (*match)(struct nouveau_i2c_port *,
33 struct i2c_board_info *));
34 struct list_head ports;
35};
36
37static inline struct nouveau_i2c *
38nouveau_i2c(void *obj)
39{
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
41}
42
43extern struct nouveau_oclass nouveau_i2c_oclass;
44
45void nouveau_i2c_drive_scl(void *, int);
46void nouveau_i2c_drive_sda(void *, int);
47int nouveau_i2c_sense_scl(void *);
48int nouveau_i2c_sense_sda(void *);
49
50int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg);
51int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val);
52bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr);
53
54int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
55int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
56
57extern const struct i2c_algorithm nouveau_i2c_bit_algo;
58extern const struct i2c_algorithm nouveau_i2c_aux_algo;
59
60#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
new file mode 100644
index 000000000000..88814f159d89
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ibus.h
@@ -0,0 +1,34 @@
1#ifndef __NOUVEAU_IBUS_H__
2#define __NOUVEAU_IBUS_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_ibus {
8 struct nouveau_subdev base;
9};
10
11static inline struct nouveau_ibus *
12nouveau_ibus(void *obj)
13{
14 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_IBUS];
15}
16
17#define nouveau_ibus_create(p,e,o,d) \
18 nouveau_subdev_create_((p), (e), (o), 0, "PIBUS", "ibus", \
19 sizeof(**d), (void **)d)
20#define nouveau_ibus_destroy(p) \
21 nouveau_subdev_destroy(&(p)->base)
22#define nouveau_ibus_init(p) \
23 nouveau_subdev_init(&(p)->base)
24#define nouveau_ibus_fini(p,s) \
25 nouveau_subdev_fini(&(p)->base, (s))
26
27#define _nouveau_ibus_dtor _nouveau_subdev_dtor
28#define _nouveau_ibus_init _nouveau_subdev_init
29#define _nouveau_ibus_fini _nouveau_subdev_fini
30
31extern struct nouveau_oclass nvc0_ibus_oclass;
32extern struct nouveau_oclass nve0_ibus_oclass;
33
34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
new file mode 100644
index 000000000000..ec7a54e91a08
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -0,0 +1,73 @@
1#ifndef __NOUVEAU_INSTMEM_H__
2#define __NOUVEAU_INSTMEM_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6#include <core/mm.h>
7
8struct nouveau_instobj {
9 struct nouveau_object base;
10 struct list_head head;
11 u32 *suspend;
12 u64 addr;
13 u32 size;
14};
15
16static inline struct nouveau_instobj *
17nv_memobj(void *obj)
18{
19#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
20 if (unlikely(!nv_iclass(obj, NV_MEMOBJ_CLASS)))
21 nv_assert("BAD CAST -> NvMemObj, %08x", nv_hclass(obj));
22#endif
23 return obj;
24}
25
26#define nouveau_instobj_create(p,e,o,d) \
27 nouveau_instobj_create_((p), (e), (o), sizeof(**d), (void **)d)
28#define nouveau_instobj_init(p) \
29 nouveau_object_init(&(p)->base)
30#define nouveau_instobj_fini(p,s) \
31 nouveau_object_fini(&(p)->base, (s))
32
33int nouveau_instobj_create_(struct nouveau_object *, struct nouveau_object *,
34 struct nouveau_oclass *, int, void **);
35void nouveau_instobj_destroy(struct nouveau_instobj *);
36
37void _nouveau_instobj_dtor(struct nouveau_object *);
38#define _nouveau_instobj_init nouveau_object_init
39#define _nouveau_instobj_fini nouveau_object_fini
40
41struct nouveau_instmem {
42 struct nouveau_subdev base;
43 struct list_head list;
44
45 u32 reserved;
46 int (*alloc)(struct nouveau_instmem *, struct nouveau_object *,
47 u32 size, u32 align, struct nouveau_object **);
48};
49
50static inline struct nouveau_instmem *
51nouveau_instmem(void *obj)
52{
53 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_INSTMEM];
54}
55
56#define nouveau_instmem_create(p,e,o,d) \
57 nouveau_instmem_create_((p), (e), (o), sizeof(**d), (void **)d)
58#define nouveau_instmem_destroy(p) \
59 nouveau_subdev_destroy(&(p)->base)
60int nouveau_instmem_create_(struct nouveau_object *, struct nouveau_object *,
61 struct nouveau_oclass *, int, void **);
62int nouveau_instmem_init(struct nouveau_instmem *);
63int nouveau_instmem_fini(struct nouveau_instmem *, bool);
64
65#define _nouveau_instmem_dtor _nouveau_subdev_dtor
66int _nouveau_instmem_init(struct nouveau_object *);
67int _nouveau_instmem_fini(struct nouveau_object *, bool);
68
69extern struct nouveau_oclass nv04_instmem_oclass;
70extern struct nouveau_oclass nv40_instmem_oclass;
71extern struct nouveau_oclass nv50_instmem_oclass;
72
73#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
new file mode 100644
index 000000000000..f351f63bc654
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
@@ -0,0 +1,33 @@
1#ifndef __NOUVEAU_LTCG_H__
2#define __NOUVEAU_LTCG_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_ltcg {
8 struct nouveau_subdev base;
9};
10
11static inline struct nouveau_ltcg *
12nouveau_ltcg(void *obj)
13{
14 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
15}
16
17#define nouveau_ltcg_create(p,e,o,d) \
18 nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \
19 sizeof(**d), (void **)d)
20#define nouveau_ltcg_destroy(p) \
21 nouveau_subdev_destroy(&(p)->base)
22#define nouveau_ltcg_init(p) \
23 nouveau_subdev_init(&(p)->base)
24#define nouveau_ltcg_fini(p,s) \
25 nouveau_subdev_fini(&(p)->base, (s))
26
27#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
28#define _nouveau_ltcg_init _nouveau_subdev_init
29#define _nouveau_ltcg_fini _nouveau_subdev_fini
30
31extern struct nouveau_oclass nvc0_ltcg_oclass;
32
33#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
new file mode 100644
index 000000000000..fded97cea500
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -0,0 +1,49 @@
1#ifndef __NOUVEAU_MC_H__
2#define __NOUVEAU_MC_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_mc_intr {
8 u32 stat;
9 u32 unit;
10};
11
12struct nouveau_mc {
13 struct nouveau_subdev base;
14 const struct nouveau_mc_intr *intr_map;
15};
16
17static inline struct nouveau_mc *
18nouveau_mc(void *obj)
19{
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21}
22
23#define nouveau_mc_create(p,e,o,d) \
24 nouveau_subdev_create_((p), (e), (o), 0, "PMC", "master", \
25 sizeof(**d), (void **)d)
26#define nouveau_mc_destroy(p) \
27 nouveau_subdev_destroy(&(p)->base)
28#define nouveau_mc_init(p) \
29 nouveau_subdev_init(&(p)->base)
30#define nouveau_mc_fini(p,s) \
31 nouveau_subdev_fini(&(p)->base, (s))
32
33#define _nouveau_mc_dtor _nouveau_subdev_dtor
34#define _nouveau_mc_init _nouveau_subdev_init
35#define _nouveau_mc_fini _nouveau_subdev_fini
36
37extern struct nouveau_oclass nv04_mc_oclass;
38extern struct nouveau_oclass nv44_mc_oclass;
39extern struct nouveau_oclass nv50_mc_oclass;
40extern struct nouveau_oclass nv98_mc_oclass;
41extern struct nouveau_oclass nvc0_mc_oclass;
42
43void nouveau_mc_intr(struct nouveau_subdev *);
44
45extern const struct nouveau_mc_intr nv04_mc_intr[];
46int nv04_mc_init(struct nouveau_object *);
47int nv50_mc_init(struct nouveau_object *);
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
new file mode 100644
index 000000000000..b93b152cb566
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mxm.h
@@ -0,0 +1,37 @@
1#ifndef __NOUVEAU_MXM_H__
2#define __NOUVEAU_MXM_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7#define MXM_SANITISE_DCB 0x00000001
8
9struct nouveau_mxm {
10 struct nouveau_subdev base;
11 u32 action;
12 u8 *mxms;
13};
14
15static inline struct nouveau_mxm *
16nouveau_mxm(void *obj)
17{
18 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MXM];
19}
20
21#define nouveau_mxm_create(p,e,o,d) \
22 nouveau_mxm_create_((p), (e), (o), sizeof(**d), (void **)d)
23#define nouveau_mxm_init(p) \
24 nouveau_subdev_init(&(p)->base)
25#define nouveau_mxm_fini(p,s) \
26 nouveau_subdev_fini(&(p)->base, (s))
27int nouveau_mxm_create_(struct nouveau_object *, struct nouveau_object *,
28 struct nouveau_oclass *, int, void **);
29void nouveau_mxm_destroy(struct nouveau_mxm *);
30
31#define _nouveau_mxm_dtor _nouveau_subdev_dtor
32#define _nouveau_mxm_init _nouveau_subdev_init
33#define _nouveau_mxm_fini _nouveau_subdev_fini
34
35extern struct nouveau_oclass nv50_mxm_oclass;
36
37#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
new file mode 100644
index 000000000000..faee569fd458
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -0,0 +1,58 @@
1#ifndef __NOUVEAU_THERM_H__
2#define __NOUVEAU_THERM_H__
3
4#include <core/device.h>
5#include <core/subdev.h>
6
7enum nouveau_therm_fan_mode {
8 FAN_CONTROL_NONE = 0,
9 FAN_CONTROL_MANUAL = 1,
10 FAN_CONTROL_NR,
11};
12
13enum nouveau_therm_attr_type {
14 NOUVEAU_THERM_ATTR_FAN_MIN_DUTY = 0,
15 NOUVEAU_THERM_ATTR_FAN_MAX_DUTY = 1,
16 NOUVEAU_THERM_ATTR_FAN_MODE = 2,
17
18 NOUVEAU_THERM_ATTR_THRS_FAN_BOOST = 10,
19 NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST = 11,
20 NOUVEAU_THERM_ATTR_THRS_DOWN_CLK = 12,
21 NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST = 13,
22 NOUVEAU_THERM_ATTR_THRS_CRITICAL = 14,
23 NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST = 15,
24 NOUVEAU_THERM_ATTR_THRS_SHUTDOWN = 16,
25 NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
26};
27
28struct nouveau_therm {
29 struct nouveau_subdev base;
30
31 int (*fan_get)(struct nouveau_therm *);
32 int (*fan_set)(struct nouveau_therm *, int);
33 int (*fan_sense)(struct nouveau_therm *);
34
35 int (*temp_get)(struct nouveau_therm *);
36
37 int (*attr_get)(struct nouveau_therm *, enum nouveau_therm_attr_type);
38 int (*attr_set)(struct nouveau_therm *,
39 enum nouveau_therm_attr_type, int);
40};
41
42static inline struct nouveau_therm *
43nouveau_therm(void *obj)
44{
45 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_THERM];
46}
47
48#define nouveau_therm_create(p,e,o,d) \
49 nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d)
50#define nouveau_therm_destroy(p) \
51 nouveau_subdev_destroy(&(p)->base)
52
53#define _nouveau_therm_dtor _nouveau_subdev_dtor
54
55extern struct nouveau_oclass nv40_therm_oclass;
56extern struct nouveau_oclass nv50_therm_oclass;
57
58#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
new file mode 100644
index 000000000000..49bff901544c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -0,0 +1,53 @@
1#ifndef __NOUVEAU_TIMER_H__
2#define __NOUVEAU_TIMER_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_alarm {
8 struct list_head head;
9 u64 timestamp;
10 void (*func)(struct nouveau_alarm *);
11};
12
13bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
14bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
15bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
16void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
17
18#define NV_WAIT_DEFAULT 2000000000ULL
19#define nv_wait(o,a,m,v) \
20 nouveau_timer_wait_eq((o), NV_WAIT_DEFAULT, (a), (m), (v))
21#define nv_wait_ne(o,a,m,v) \
22 nouveau_timer_wait_ne((o), NV_WAIT_DEFAULT, (a), (m), (v))
23#define nv_wait_cb(o,c,d) \
24 nouveau_timer_wait_cb((o), NV_WAIT_DEFAULT, (c), (d))
25
26struct nouveau_timer {
27 struct nouveau_subdev base;
28 u64 (*read)(struct nouveau_timer *);
29 void (*alarm)(struct nouveau_timer *, u32 time, struct nouveau_alarm *);
30};
31
32static inline struct nouveau_timer *
33nouveau_timer(void *obj)
34{
35 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_TIMER];
36}
37
38#define nouveau_timer_create(p,e,o,d) \
39 nouveau_subdev_create_((p), (e), (o), 0, "PTIMER", "timer", \
40 sizeof(**d), (void **)d)
41#define nouveau_timer_destroy(p) \
42 nouveau_subdev_destroy(&(p)->base)
43#define nouveau_timer_init(p) \
44 nouveau_subdev_init(&(p)->base)
45#define nouveau_timer_fini(p,s) \
46 nouveau_subdev_fini(&(p)->base, (s))
47
48int nouveau_timer_create_(struct nouveau_object *, struct nouveau_engine *,
49 struct nouveau_oclass *, int size, void **);
50
51extern struct nouveau_oclass nv04_timer_oclass;
52
53#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vga.h b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
new file mode 100644
index 000000000000..fee09ad818e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vga.h
@@ -0,0 +1,30 @@
1#ifndef __NOUVEAU_VGA_H__
2#define __NOUVEAU_VGA_H__
3
4#include <core/os.h>
5
6/* access to various legacy io ports */
7u8 nv_rdport(void *obj, int head, u16 port);
8void nv_wrport(void *obj, int head, u16 port, u8 value);
9
10/* VGA Sequencer */
11u8 nv_rdvgas(void *obj, int head, u8 index);
12void nv_wrvgas(void *obj, int head, u8 index, u8 value);
13
14/* VGA Graphics */
15u8 nv_rdvgag(void *obj, int head, u8 index);
16void nv_wrvgag(void *obj, int head, u8 index, u8 value);
17
18/* VGA CRTC */
19u8 nv_rdvgac(void *obj, int head, u8 index);
20void nv_wrvgac(void *obj, int head, u8 index, u8 value);
21
22/* VGA indexed port access dispatcher */
23u8 nv_rdvgai(void *obj, int head, u16 port, u8 index);
24void nv_wrvgai(void *obj, int head, u16 port, u8 index, u8 value);
25
26bool nv_lockvgac(void *obj, bool lock);
27u8 nv_rdvgaowner(void *obj);
28void nv_wrvgaowner(void *obj, u8);
29
30#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index 3cdf6001d635..9d595efe667a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -25,10 +25,10 @@
25#ifndef __NOUVEAU_VM_H__ 25#ifndef __NOUVEAU_VM_H__
26#define __NOUVEAU_VM_H__ 26#define __NOUVEAU_VM_H__
27 27
28#include <drm/drmP.h> 28#include <core/object.h>
29 29#include <core/subdev.h>
30#include "nouveau_drv.h" 30#include <core/device.h>
31#include "nouveau_mm.h" 31#include <core/mm.h>
32 32
33struct nouveau_vm_pgt { 33struct nouveau_vm_pgt {
34 struct nouveau_gpuobj *obj[2]; 34 struct nouveau_gpuobj *obj[2];
@@ -40,6 +40,9 @@ struct nouveau_vm_pgd {
40 struct nouveau_gpuobj *obj; 40 struct nouveau_gpuobj *obj;
41}; 41};
42 42
43struct nouveau_gpuobj;
44struct nouveau_mem;
45
43struct nouveau_vma { 46struct nouveau_vma {
44 struct list_head head; 47 struct list_head head;
45 int refcount; 48 int refcount;
@@ -50,21 +53,30 @@ struct nouveau_vma {
50}; 53};
51 54
52struct nouveau_vm { 55struct nouveau_vm {
53 struct drm_device *dev; 56 struct nouveau_vmmgr *vmm;
54 struct nouveau_mm mm; 57 struct nouveau_mm mm;
55 int refcount; 58 int refcount;
56 59
57 struct list_head pgd_list; 60 struct list_head pgd_list;
58 atomic_t engref[16]; 61 atomic_t engref[64]; //NVDEV_SUBDEV_NR];
59 62
60 struct nouveau_vm_pgt *pgt; 63 struct nouveau_vm_pgt *pgt;
61 u32 fpde; 64 u32 fpde;
62 u32 lpde; 65 u32 lpde;
66};
67
68struct nouveau_vmmgr {
69 struct nouveau_subdev base;
63 70
71 u64 limit;
72 u8 dma_bits;
64 u32 pgt_bits; 73 u32 pgt_bits;
65 u8 spg_shift; 74 u8 spg_shift;
66 u8 lpg_shift; 75 u8 lpg_shift;
67 76
77 int (*create)(struct nouveau_vmmgr *, u64 offset, u64 length,
78 u64 mm_offset, struct nouveau_vm **);
79
68 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, 80 void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
69 struct nouveau_gpuobj *pgt[2]); 81 struct nouveau_gpuobj *pgt[2]);
70 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, 82 void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
@@ -72,16 +84,47 @@ struct nouveau_vm {
72 u64 phys, u64 delta); 84 u64 phys, u64 delta);
73 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, 85 void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
74 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *); 86 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
75
76 void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
77 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
78 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); 87 void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
79 void (*flush)(struct nouveau_vm *); 88 void (*flush)(struct nouveau_vm *);
80}; 89};
81 90
82/* nouveau_vm.c */ 91static inline struct nouveau_vmmgr *
83int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, 92nouveau_vmmgr(void *obj)
93{
94 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_VM];
95}
96
97#define nouveau_vmmgr_create(p,e,o,i,f,d) \
98 nouveau_subdev_create((p), (e), (o), 0, (i), (f), (d))
99#define nouveau_vmmgr_destroy(p) \
100 nouveau_subdev_destroy(&(p)->base)
101#define nouveau_vmmgr_init(p) \
102 nouveau_subdev_init(&(p)->base)
103#define nouveau_vmmgr_fini(p,s) \
104 nouveau_subdev_fini(&(p)->base, (s))
105
106#define _nouveau_vmmgr_dtor _nouveau_subdev_dtor
107#define _nouveau_vmmgr_init _nouveau_subdev_init
108#define _nouveau_vmmgr_fini _nouveau_subdev_fini
109
110extern struct nouveau_oclass nv04_vmmgr_oclass;
111extern struct nouveau_oclass nv41_vmmgr_oclass;
112extern struct nouveau_oclass nv44_vmmgr_oclass;
113extern struct nouveau_oclass nv50_vmmgr_oclass;
114extern struct nouveau_oclass nvc0_vmmgr_oclass;
115
116int nv04_vm_create(struct nouveau_vmmgr *, u64, u64, u64,
84 struct nouveau_vm **); 117 struct nouveau_vm **);
118void nv04_vmmgr_dtor(struct nouveau_object *);
119
120void nv50_vm_flush_engine(struct nouveau_subdev *, int engine);
121void nvc0_vm_flush_engine(struct nouveau_subdev *, u64 addr, int type);
122
123/* nouveau_vm.c */
124int nouveau_vm_create(struct nouveau_vmmgr *, u64 offset, u64 length,
125 u64 mm_offset, u32 block, struct nouveau_vm **);
126int nouveau_vm_new(struct nouveau_device *, u64 offset, u64 length,
127 u64 mm_offset, struct nouveau_vm **);
85int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, 128int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
86 struct nouveau_gpuobj *pgd); 129 struct nouveau_gpuobj *pgd);
87int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, 130int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
@@ -94,26 +137,6 @@ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
94void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, 137void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
95 struct nouveau_mem *); 138 struct nouveau_mem *);
96void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length, 139void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
97 struct nouveau_mem *mem); 140 struct nouveau_mem *mem);
98/* nv50_vm.c */
99void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
100 struct nouveau_gpuobj *pgt[2]);
101void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
102 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
103void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
104 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
105void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
106void nv50_vm_flush(struct nouveau_vm *);
107void nv50_vm_flush_engine(struct drm_device *, int engine);
108
109/* nvc0_vm.c */
110void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
111 struct nouveau_gpuobj *pgt[2]);
112void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
113 struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
114void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
115 struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
116void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
117void nvc0_vm_flush(struct nouveau_vm *);
118 141
119#endif 142#endif
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
new file mode 100644
index 000000000000..cfe3b9cad156
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_OS_H__
2#define __NOUVEAU_OS_H__
3
4#include <linux/types.h>
5#include <linux/slab.h>
6#include <linux/mutex.h>
7#include <linux/pci.h>
8#include <linux/printk.h>
9#include <linux/bitops.h>
10#include <linux/firmware.h>
11#include <linux/module.h>
12#include <linux/i2c.h>
13#include <linux/i2c-algo-bit.h>
14#include <linux/delay.h>
15#include <linux/io-mapping.h>
16#include <linux/vmalloc.h>
17#include <linux/acpi.h>
18#include <linux/dmi.h>
19
20#include <asm/unaligned.h>
21
22static inline int
23ffsll(u64 mask)
24{
25 int i;
26 for (i = 0; i < 64; i++) {
27 if (mask & (1ULL << i))
28 return i + 1;
29 }
30 return 0;
31}
32
33#ifndef ioread32_native
34#ifdef __BIG_ENDIAN
35#define ioread16_native ioread16be
36#define iowrite16_native iowrite16be
37#define ioread32_native ioread32be
38#define iowrite32_native iowrite32be
39#else /* def __BIG_ENDIAN */
40#define ioread16_native ioread16
41#define iowrite16_native iowrite16
42#define ioread32_native ioread32
43#define iowrite32_native iowrite32
44#endif /* def __BIG_ENDIAN else */
45#endif /* !ioread32_native */
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
new file mode 100644
index 000000000000..cd01c533007a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <subdev/bar.h>
27
28struct nouveau_barobj {
29 struct nouveau_object base;
30 struct nouveau_vma vma;
31 void __iomem *iomem;
32};
33
34static int
35nouveau_barobj_ctor(struct nouveau_object *parent,
36 struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *mem, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_bar *bar = (void *)engine;
41 struct nouveau_barobj *barobj;
42 int ret;
43
44 ret = nouveau_object_create(parent, engine, oclass, 0, &barobj);
45 *pobject = nv_object(barobj);
46 if (ret)
47 return ret;
48
49 ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma);
50 if (ret)
51 return ret;
52
53 barobj->iomem = bar->iomem + (u32)barobj->vma.offset;
54 return 0;
55}
56
57static void
58nouveau_barobj_dtor(struct nouveau_object *object)
59{
60 struct nouveau_bar *bar = (void *)object->engine;
61 struct nouveau_barobj *barobj = (void *)object;
62 if (barobj->vma.node)
63 bar->unmap(bar, &barobj->vma);
64 nouveau_object_destroy(&barobj->base);
65}
66
67static u32
68nouveau_barobj_rd32(struct nouveau_object *object, u32 addr)
69{
70 struct nouveau_barobj *barobj = (void *)object;
71 return ioread32_native(barobj->iomem + addr);
72}
73
74static void
75nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
76{
77 struct nouveau_barobj *barobj = (void *)object;
78 iowrite32_native(data, barobj->iomem + addr);
79}
80
81static struct nouveau_oclass
82nouveau_barobj_oclass = {
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nouveau_barobj_ctor,
85 .dtor = nouveau_barobj_dtor,
86 .init = nouveau_object_init,
87 .fini = nouveau_object_fini,
88 .rd32 = nouveau_barobj_rd32,
89 .wr32 = nouveau_barobj_wr32,
90 },
91};
92
93int
94nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
95 struct nouveau_mem *mem, struct nouveau_object **pobject)
96{
97 struct nouveau_object *engine = nv_object(bar);
98 return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
99 mem, 0, pobject);
100}
101
102int
103nouveau_bar_create_(struct nouveau_object *parent,
104 struct nouveau_object *engine,
105 struct nouveau_oclass *oclass, int length, void **pobject)
106{
107 struct nouveau_device *device = nv_device(parent);
108 struct nouveau_bar *bar;
109 int ret;
110
111 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL",
112 "bar", length, pobject);
113 bar = *pobject;
114 if (ret)
115 return ret;
116
117 bar->iomem = ioremap(pci_resource_start(device->pdev, 3),
118 pci_resource_len(device->pdev, 3));
119 return 0;
120}
121
122void
123nouveau_bar_destroy(struct nouveau_bar *bar)
124{
125 if (bar->iomem)
126 iounmap(bar->iomem);
127 nouveau_subdev_destroy(&bar->base);
128}
129
130void
131_nouveau_bar_dtor(struct nouveau_object *object)
132{
133 struct nouveau_bar *bar = (void *)object;
134 nouveau_bar_destroy(bar);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
new file mode 100644
index 000000000000..c3acf5b70d9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nv50.c
@@ -0,0 +1,263 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nv50_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct nouveau_gpuobj *mem;
36 struct nouveau_gpuobj *pad;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *bar1_vm;
39 struct nouveau_gpuobj *bar1;
40 struct nouveau_vm *bar3_vm;
41 struct nouveau_gpuobj *bar3;
42};
43
44static int
45nv50_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
46 u32 flags, struct nouveau_vma *vma)
47{
48 struct nv50_bar_priv *priv = (void *)bar;
49 int ret;
50
51 ret = nouveau_vm_get(priv->bar3_vm, mem->size << 12, 12, flags, vma);
52 if (ret)
53 return ret;
54
55 nouveau_vm_map(vma, mem);
56 nv50_vm_flush_engine(nv_subdev(bar), 6);
57 return 0;
58}
59
60static int
61nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
62 u32 flags, struct nouveau_vma *vma)
63{
64 struct nv50_bar_priv *priv = (void *)bar;
65 int ret;
66
67 ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma);
68 if (ret)
69 return ret;
70
71 nouveau_vm_map(vma, mem);
72 nv50_vm_flush_engine(nv_subdev(bar), 6);
73 return 0;
74}
75
76static void
77nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
78{
79 nouveau_vm_unmap(vma);
80 nv50_vm_flush_engine(nv_subdev(bar), 6);
81 nouveau_vm_put(vma);
82}
83
84static void
85nv50_bar_flush(struct nouveau_bar *bar)
86{
87 struct nv50_bar_priv *priv = (void *)bar;
88 unsigned long flags;
89 spin_lock_irqsave(&priv->lock, flags);
90 nv_wr32(priv, 0x00330c, 0x00000001);
91 if (!nv_wait(priv, 0x00330c, 0x00000002, 0x00000000))
92 nv_warn(priv, "flush timeout\n");
93 spin_unlock_irqrestore(&priv->lock, flags);
94}
95
96void
97nv84_bar_flush(struct nouveau_bar *bar)
98{
99 struct nv50_bar_priv *priv = (void *)bar;
100 unsigned long flags;
101 spin_lock_irqsave(&priv->lock, flags);
102 nv_wr32(bar, 0x070000, 0x00000001);
103 if (!nv_wait(priv, 0x070000, 0x00000002, 0x00000000))
104 nv_warn(priv, "flush timeout\n");
105 spin_unlock_irqrestore(&priv->lock, flags);
106}
107
108static int
109nv50_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_device *device = nv_device(parent);
114 struct nouveau_object *heap;
115 struct nouveau_vm *vm;
116 struct nv50_bar_priv *priv;
117 u64 start, limit;
118 int ret;
119
120 ret = nouveau_bar_create(parent, engine, oclass, &priv);
121 *pobject = nv_object(priv);
122 if (ret)
123 return ret;
124
125 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0, NVOBJ_FLAG_HEAP,
126 &priv->mem);
127 heap = nv_object(priv->mem);
128 if (ret)
129 return ret;
130
131 ret = nouveau_gpuobj_new(parent, heap, (device->chipset == 0x50) ?
132 0x1400 : 0x0200, 0, 0, &priv->pad);
133 if (ret)
134 return ret;
135
136 ret = nouveau_gpuobj_new(parent, heap, 0x4000, 0, 0, &priv->pgd);
137 if (ret)
138 return ret;
139
140 /* BAR3 */
141 start = 0x0100000000ULL;
142 limit = start + pci_resource_len(device->pdev, 3);
143
144 ret = nouveau_vm_new(device, start, limit, start, &vm);
145 if (ret)
146 return ret;
147
148 ret = nouveau_gpuobj_new(parent, heap, ((limit-- - start) >> 12) * 8,
149 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
150 &vm->pgt[0].obj[0]);
151 vm->pgt[0].refcount[0] = 1;
152 if (ret)
153 return ret;
154
155 ret = nouveau_vm_ref(vm, &priv->bar3_vm, priv->pgd);
156 nouveau_vm_ref(NULL, &vm, NULL);
157 if (ret)
158 return ret;
159
160 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar3);
161 if (ret)
162 return ret;
163
164 nv_wo32(priv->bar3, 0x00, 0x7fc00000);
165 nv_wo32(priv->bar3, 0x04, lower_32_bits(limit));
166 nv_wo32(priv->bar3, 0x08, lower_32_bits(start));
167 nv_wo32(priv->bar3, 0x0c, upper_32_bits(limit) << 24 |
168 upper_32_bits(start));
169 nv_wo32(priv->bar3, 0x10, 0x00000000);
170 nv_wo32(priv->bar3, 0x14, 0x00000000);
171
172 /* BAR1 */
173 start = 0x0000000000ULL;
174 limit = start + pci_resource_len(device->pdev, 1);
175
176 ret = nouveau_vm_new(device, start, limit--, start, &vm);
177 if (ret)
178 return ret;
179
180 ret = nouveau_vm_ref(vm, &priv->bar1_vm, priv->pgd);
181 nouveau_vm_ref(NULL, &vm, NULL);
182 if (ret)
183 return ret;
184
185 ret = nouveau_gpuobj_new(parent, heap, 24, 16, 0, &priv->bar1);
186 if (ret)
187 return ret;
188
189 nv_wo32(priv->bar1, 0x00, 0x7fc00000);
190 nv_wo32(priv->bar1, 0x04, lower_32_bits(limit));
191 nv_wo32(priv->bar1, 0x08, lower_32_bits(start));
192 nv_wo32(priv->bar1, 0x0c, upper_32_bits(limit) << 24 |
193 upper_32_bits(start));
194 nv_wo32(priv->bar1, 0x10, 0x00000000);
195 nv_wo32(priv->bar1, 0x14, 0x00000000);
196
197 priv->base.alloc = nouveau_bar_alloc;
198 priv->base.kmap = nv50_bar_kmap;
199 priv->base.umap = nv50_bar_umap;
200 priv->base.unmap = nv50_bar_unmap;
201 if (device->chipset == 0x50)
202 priv->base.flush = nv50_bar_flush;
203 else
204 priv->base.flush = nv84_bar_flush;
205 spin_lock_init(&priv->lock);
206 return 0;
207}
208
209static void
210nv50_bar_dtor(struct nouveau_object *object)
211{
212 struct nv50_bar_priv *priv = (void *)object;
213 nouveau_gpuobj_ref(NULL, &priv->bar1);
214 nouveau_vm_ref(NULL, &priv->bar1_vm, priv->pgd);
215 nouveau_gpuobj_ref(NULL, &priv->bar3);
216 if (priv->bar3_vm) {
217 nouveau_gpuobj_ref(NULL, &priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &priv->bar3_vm, priv->pgd);
219 }
220 nouveau_gpuobj_ref(NULL, &priv->pgd);
221 nouveau_gpuobj_ref(NULL, &priv->pad);
222 nouveau_gpuobj_ref(NULL, &priv->mem);
223 nouveau_bar_destroy(&priv->base);
224}
225
226static int
227nv50_bar_init(struct nouveau_object *object)
228{
229 struct nv50_bar_priv *priv = (void *)object;
230 int ret;
231
232 ret = nouveau_bar_init(&priv->base);
233 if (ret)
234 return ret;
235
236 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
237 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
238 nv50_vm_flush_engine(nv_subdev(priv), 6);
239
240 nv_wr32(priv, 0x001704, 0x00000000 | priv->mem->addr >> 12);
241 nv_wr32(priv, 0x001704, 0x40000000 | priv->mem->addr >> 12);
242 nv_wr32(priv, 0x001708, 0x80000000 | priv->bar1->node->offset >> 4);
243 nv_wr32(priv, 0x00170c, 0x80000000 | priv->bar3->node->offset >> 4);
244 return 0;
245}
246
247static int
248nv50_bar_fini(struct nouveau_object *object, bool suspend)
249{
250 struct nv50_bar_priv *priv = (void *)object;
251 return nouveau_bar_fini(&priv->base, suspend);
252}
253
254struct nouveau_oclass
255nv50_bar_oclass = {
256 .handle = NV_SUBDEV(BAR, 0x50),
257 .ofuncs = &(struct nouveau_ofuncs) {
258 .ctor = nv50_bar_ctor,
259 .dtor = nv50_bar_dtor,
260 .init = nv50_bar_init,
261 .fini = nv50_bar_fini,
262 },
263};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
new file mode 100644
index 000000000000..77a6fb725d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include <subdev/timer.h>
28#include <subdev/bar.h>
29#include <subdev/fb.h>
30#include <subdev/vm.h>
31
32struct nvc0_bar_priv {
33 struct nouveau_bar base;
34 spinlock_t lock;
35 struct {
36 struct nouveau_gpuobj *mem;
37 struct nouveau_gpuobj *pgd;
38 struct nouveau_vm *vm;
39 } bar[2];
40};
41
42static int
43nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem,
44 u32 flags, struct nouveau_vma *vma)
45{
46 struct nvc0_bar_priv *priv = (void *)bar;
47 int ret;
48
49 ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma);
50 if (ret)
51 return ret;
52
53 nouveau_vm_map(vma, mem);
54 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5);
55 return 0;
56}
57
58static int
59nvc0_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem,
60 u32 flags, struct nouveau_vma *vma)
61{
62 struct nvc0_bar_priv *priv = (void *)bar;
63 int ret;
64
65 ret = nouveau_vm_get(priv->bar[1].vm, mem->size << 12,
66 mem->page_shift, flags, vma);
67 if (ret)
68 return ret;
69
70 nouveau_vm_map(vma, mem);
71 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[1].pgd->addr, 5);
72 return 0;
73}
74
75static void
76nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma)
77{
78 struct nvc0_bar_priv *priv = (void *)bar;
79 int i = !(vma->vm == priv->bar[0].vm);
80
81 nouveau_vm_unmap(vma);
82 nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5);
83 nouveau_vm_put(vma);
84}
85
86static int
87nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_device *device = nv_device(parent);
92 struct pci_dev *pdev = device->pdev;
93 struct nvc0_bar_priv *priv;
94 struct nouveau_gpuobj *mem;
95 struct nouveau_vm *vm;
96 int ret;
97
98 ret = nouveau_bar_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 /* BAR3 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[0].mem);
105 mem = priv->bar[0].mem;
106 if (ret)
107 return ret;
108
109 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[0].pgd);
110 if (ret)
111 return ret;
112
113 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 3), 0, &vm);
114 if (ret)
115 return ret;
116
117 ret = nouveau_gpuobj_new(parent, NULL,
118 (pci_resource_len(pdev, 3) >> 12) * 8,
119 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
120 &vm->pgt[0].obj[0]);
121 vm->pgt[0].refcount[0] = 1;
122 if (ret)
123 return ret;
124
125 ret = nouveau_vm_ref(vm, &priv->bar[0].vm, priv->bar[0].pgd);
126 nouveau_vm_ref(NULL, &vm, NULL);
127 if (ret)
128 return ret;
129
130 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[0].pgd->addr));
131 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[0].pgd->addr));
132 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 3) - 1));
133 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 3) - 1));
134
135 /* BAR1 */
136 ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0, 0, &priv->bar[1].mem);
137 mem = priv->bar[1].mem;
138 if (ret)
139 return ret;
140
141 ret = nouveau_gpuobj_new(parent, NULL, 0x8000, 0, 0, &priv->bar[1].pgd);
142 if (ret)
143 return ret;
144
145 ret = nouveau_vm_new(device, 0, pci_resource_len(pdev, 1), 0, &vm);
146 if (ret)
147 return ret;
148
149 ret = nouveau_vm_ref(vm, &priv->bar[1].vm, priv->bar[1].pgd);
150 nouveau_vm_ref(NULL, &vm, NULL);
151 if (ret)
152 return ret;
153
154 nv_wo32(mem, 0x0200, lower_32_bits(priv->bar[1].pgd->addr));
155 nv_wo32(mem, 0x0204, upper_32_bits(priv->bar[1].pgd->addr));
156 nv_wo32(mem, 0x0208, lower_32_bits(pci_resource_len(pdev, 1) - 1));
157 nv_wo32(mem, 0x020c, upper_32_bits(pci_resource_len(pdev, 1) - 1));
158
159 priv->base.alloc = nouveau_bar_alloc;
160 priv->base.kmap = nvc0_bar_kmap;
161 priv->base.umap = nvc0_bar_umap;
162 priv->base.unmap = nvc0_bar_unmap;
163 priv->base.flush = nv84_bar_flush;
164 spin_lock_init(&priv->lock);
165 return 0;
166}
167
168static void
169nvc0_bar_dtor(struct nouveau_object *object)
170{
171 struct nvc0_bar_priv *priv = (void *)object;
172
173 nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd);
174 nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd);
175 nouveau_gpuobj_ref(NULL, &priv->bar[1].mem);
176
177 if (priv->bar[0].vm) {
178 nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]);
179 nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd);
180 }
181 nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd);
182 nouveau_gpuobj_ref(NULL, &priv->bar[0].mem);
183
184 nouveau_bar_destroy(&priv->base);
185}
186
187static int
188nvc0_bar_init(struct nouveau_object *object)
189{
190 struct nvc0_bar_priv *priv = (void *)object;
191 int ret;
192
193 ret = nouveau_bar_init(&priv->base);
194 if (ret)
195 return ret;
196
197 nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
198 nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
199 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
200
201 nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
202 nv_wr32(priv, 0x001714, 0xc0000000 | priv->bar[0].mem->addr >> 12);
203 return 0;
204}
205
206struct nouveau_oclass
207nvc0_bar_oclass = {
208 .handle = NV_SUBDEV(BAR, 0xc0),
209 .ofuncs = &(struct nouveau_ofuncs) {
210 .ctor = nvc0_bar_ctor,
211 .dtor = nvc0_bar_dtor,
212 .init = nvc0_bar_init,
213 .fini = _nouveau_bar_fini,
214 },
215};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
new file mode 100644
index 000000000000..2fbb6df697cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -0,0 +1,479 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27#include <core/subdev.h>
28#include <core/option.h>
29
30#include <subdev/bios.h>
31#include <subdev/bios/bmp.h>
32#include <subdev/bios/bit.h>
33
34u8
35nvbios_checksum(const u8 *data, int size)
36{
37 u8 sum = 0;
38 while (size--)
39 sum += *data++;
40 return sum;
41}
42
43u16
44nvbios_findstr(const u8 *data, int size, const char *str, int len)
45{
46 int i, j;
47
48 for (i = 0; i <= (size - len); i++) {
49 for (j = 0; j < len; j++)
50 if ((char)data[i + j] != str[j])
51 break;
52 if (j == len)
53 return i;
54 }
55
56 return 0;
57}
58
59#if defined(__powerpc__)
60static void
61nouveau_bios_shadow_of(struct nouveau_bios *bios)
62{
63 struct pci_dev *pdev = nv_device(bios)->pdev;
64 struct device_node *dn;
65 const u32 *data;
66 int size, i;
67
68 dn = pci_device_to_OF_node(pdev);
69 if (!dn) {
70 nv_info(bios, "Unable to get the OF node\n");
71 return;
72 }
73
74 data = of_get_property(dn, "NVDA,BMP", &size);
75 if (data) {
76 bios->size = size;
77 bios->data = kmalloc(bios->size, GFP_KERNEL);
78 if (bios->data)
79 memcpy(bios->data, data, size);
80 }
81}
82#endif
83
84static void
85nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
86{
87 struct nouveau_device *device = nv_device(bios);
88 u32 bar0 = 0;
89 int i;
90
91 if (device->card_type >= NV_50) {
92 u64 addr = (u64)(nv_rd32(bios, 0x619f04) & 0xffffff00) << 8;
93 if (!addr) {
94 addr = (u64)nv_rd32(bios, 0x001700) << 16;
95 addr += 0xf0000;
96 }
97
98 bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
99 }
100
101 /* bail if no rom signature */
102 if (nv_rd08(bios, 0x700000) != 0x55 ||
103 nv_rd08(bios, 0x700001) != 0xaa)
104 goto out;
105
106 bios->size = nv_rd08(bios, 0x700002) * 512;
107 bios->data = kmalloc(bios->size, GFP_KERNEL);
108 if (bios->data) {
109 for (i = 0; i < bios->size; i++)
110 nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
111 }
112
113out:
114 if (device->card_type >= NV_50)
115 nv_wr32(bios, 0x001700, bar0);
116}
117
118static void
119nouveau_bios_shadow_prom(struct nouveau_bios *bios)
120{
121 struct nouveau_device *device = nv_device(bios);
122 u32 pcireg, access;
123 u16 pcir;
124 int i;
125
126 /* enable access to rom */
127 if (device->card_type >= NV_50)
128 pcireg = 0x088050;
129 else
130 pcireg = 0x001850;
131 access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
132
133 /* bail if no rom signature, with a workaround for a PROM reading
134 * issue on some chipsets. the first read after a period of
135 * inactivity returns the wrong result, so retry the first header
136 * byte a few times before giving up as a workaround
137 */
138 i = 16;
139 do {
140 if (nv_rd08(bios, 0x300000) == 0x55)
141 break;
142 } while (i--);
143
144 if (!i || nv_rd08(bios, 0x300001) != 0xaa)
145 goto out;
146
147 /* additional check (see note below) - read PCI record header */
148 pcir = nv_rd08(bios, 0x300018) |
149 nv_rd08(bios, 0x300019) << 8;
150 if (nv_rd08(bios, 0x300000 + pcir) != 'P' ||
151 nv_rd08(bios, 0x300001 + pcir) != 'C' ||
152 nv_rd08(bios, 0x300002 + pcir) != 'I' ||
153 nv_rd08(bios, 0x300003 + pcir) != 'R')
154 goto out;
155
156 /* read entire bios image to system memory */
157 bios->size = nv_rd08(bios, 0x300002) * 512;
158 bios->data = kmalloc(bios->size, GFP_KERNEL);
159 if (bios->data) {
160 for (i = 0; i < bios->size; i++)
161 nv_wo08(bios, i, nv_rd08(bios, 0x300000 + i));
162 }
163
164out:
165 /* disable access to rom */
166 nv_wr32(bios, pcireg, access);
167}
168
169#if defined(CONFIG_ACPI)
170int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
171bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
172#else
173static inline bool
174nouveau_acpi_rom_supported(struct pci_dev *pdev) {
175 return false;
176}
177
178static inline int
179nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
180 return -EINVAL;
181}
182#endif
183
184static void
185nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
186{
187 struct pci_dev *pdev = nv_device(bios)->pdev;
188 int cnt = 65536 / 4096;
189 int ret;
190
191 if (!nouveau_acpi_rom_supported(pdev))
192 return;
193
194 bios->data = kmalloc(65536, GFP_KERNEL);
195 bios->size = 0;
196 if (!bios->data)
197 return;
198
199 while (cnt--) {
200 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->size, 4096);
201 if (ret != 4096)
202 return;
203
204 bios->size += 4096;
205 }
206}
207
208static void
209nouveau_bios_shadow_pci(struct nouveau_bios *bios)
210{
211 struct pci_dev *pdev = nv_device(bios)->pdev;
212 size_t size;
213
214 if (!pci_enable_rom(pdev)) {
215 void __iomem *rom = pci_map_rom(pdev, &size);
216 if (rom && size) {
217 bios->data = kmalloc(size, GFP_KERNEL);
218 if (bios->data) {
219 memcpy_fromio(bios->data, rom, size);
220 bios->size = size;
221 }
222 }
223 if (rom)
224 pci_unmap_rom(pdev, rom);
225
226 pci_disable_rom(pdev);
227 }
228}
229
230static int
231nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
232{
233 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
234 nv_info(bios, "... signature not found\n");
235 return 0;
236 }
237
238 if (nvbios_checksum(bios->data, bios->data[2] * 512)) {
239 nv_info(bios, "... checksum invalid\n");
240 /* if a ro image is somewhat bad, it's probably all rubbish */
241 return writeable ? 2 : 1;
242 }
243
244 nv_info(bios, "... appears to be valid\n");
245 return 3;
246}
247
248struct methods {
249 const char desc[16];
250 void (*shadow)(struct nouveau_bios *);
251 const bool rw;
252 int score;
253 u32 size;
254 u8 *data;
255};
256
257static int
258nouveau_bios_shadow(struct nouveau_bios *bios)
259{
260 struct methods shadow_methods[] = {
261#if defined(__powerpc__)
262 { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
263#endif
264 { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
265 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
266 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
267 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
268 {}
269 };
270 struct methods *mthd, *best;
271 const struct firmware *fw;
272 const char *optarg;
273 int optlen, ret;
274 char *source;
275
276 optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
277 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
278 if (source) {
279 /* try to match one of the built-in methods */
280 mthd = shadow_methods;
281 do {
282 if (strcasecmp(source, mthd->desc))
283 continue;
284 nv_info(bios, "source: %s\n", mthd->desc);
285
286 mthd->shadow(bios);
287 mthd->score = nouveau_bios_score(bios, mthd->rw);
288 if (mthd->score) {
289 kfree(source);
290 return 0;
291 }
292 } while ((++mthd)->shadow);
293
294 /* attempt to load firmware image */
295 ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
296 if (ret == 0) {
297 bios->size = fw->size;
298 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
299 release_firmware(fw);
300
301 nv_info(bios, "image: %s\n", source);
302 if (nouveau_bios_score(bios, 1)) {
303 kfree(source);
304 return 0;
305 }
306
307 kfree(bios->data);
308 bios->data = NULL;
309 }
310
311 nv_error(bios, "source \'%s\' invalid\n", source);
312 kfree(source);
313 }
314
315 mthd = shadow_methods;
316 do {
317 nv_info(bios, "checking %s for image...\n", mthd->desc);
318 mthd->shadow(bios);
319 mthd->score = nouveau_bios_score(bios, mthd->rw);
320 mthd->size = bios->size;
321 mthd->data = bios->data;
322 bios->data = NULL;
323 } while (mthd->score != 3 && (++mthd)->shadow);
324
325 mthd = shadow_methods;
326 best = mthd;
327 do {
328 if (mthd->score > best->score) {
329 kfree(best->data);
330 best = mthd;
331 }
332 } while ((++mthd)->shadow);
333
334 if (best->score) {
335 nv_info(bios, "using image from %s\n", best->desc);
336 bios->size = best->size;
337 bios->data = best->data;
338 return 0;
339 }
340
341 nv_error(bios, "unable to locate usable image\n");
342 return -EINVAL;
343}
344
345static u8
346nouveau_bios_rd08(struct nouveau_object *object, u32 addr)
347{
348 struct nouveau_bios *bios = (void *)object;
349 return bios->data[addr];
350}
351
352static u16
353nouveau_bios_rd16(struct nouveau_object *object, u32 addr)
354{
355 struct nouveau_bios *bios = (void *)object;
356 return get_unaligned_le16(&bios->data[addr]);
357}
358
359static u32
360nouveau_bios_rd32(struct nouveau_object *object, u32 addr)
361{
362 struct nouveau_bios *bios = (void *)object;
363 return get_unaligned_le32(&bios->data[addr]);
364}
365
366static void
367nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data)
368{
369 struct nouveau_bios *bios = (void *)object;
370 bios->data[addr] = data;
371}
372
373static void
374nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data)
375{
376 struct nouveau_bios *bios = (void *)object;
377 put_unaligned_le16(data, &bios->data[addr]);
378}
379
380static void
381nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data)
382{
383 struct nouveau_bios *bios = (void *)object;
384 put_unaligned_le32(data, &bios->data[addr]);
385}
386
387static int
388nouveau_bios_ctor(struct nouveau_object *parent,
389 struct nouveau_object *engine,
390 struct nouveau_oclass *oclass, void *data, u32 size,
391 struct nouveau_object **pobject)
392{
393 struct nouveau_bios *bios;
394 struct bit_entry bit_i;
395 int ret;
396
397 ret = nouveau_subdev_create(parent, engine, oclass, 0,
398 "VBIOS", "bios", &bios);
399 *pobject = nv_object(bios);
400 if (ret)
401 return ret;
402
403 ret = nouveau_bios_shadow(bios);
404 if (ret)
405 return ret;
406
407 /* detect type of vbios we're dealing with */
408 bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
409 "\xff\x7f""NV\0", 5);
410 if (bios->bmp_offset) {
411 nv_info(bios, "BMP version %x.%x\n",
412 bmp_version(bios) >> 8,
413 bmp_version(bios) & 0xff);
414 }
415
416 bios->bit_offset = nvbios_findstr(bios->data, bios->size,
417 "\xff\xb8""BIT", 5);
418 if (bios->bit_offset)
419 nv_info(bios, "BIT signature found\n");
420
421 /* determine the vbios version number */
422 if (!bit_entry(bios, 'i', &bit_i) && bit_i.length >= 4) {
423 bios->version.major = nv_ro08(bios, bit_i.offset + 3);
424 bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
425 bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
426 bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
427 } else
428 if (bmp_version(bios)) {
429 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
430 bios->version.chip = nv_ro08(bios, bios->bmp_offset + 12);
431 bios->version.minor = nv_ro08(bios, bios->bmp_offset + 11);
432 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
433 }
434
435 nv_info(bios, "version %02x.%02x.%02x.%02x\n",
436 bios->version.major, bios->version.chip,
437 bios->version.minor, bios->version.micro);
438
439 return 0;
440}
441
442static void
443nouveau_bios_dtor(struct nouveau_object *object)
444{
445 struct nouveau_bios *bios = (void *)object;
446 kfree(bios->data);
447 nouveau_subdev_destroy(&bios->base);
448}
449
450static int
451nouveau_bios_init(struct nouveau_object *object)
452{
453 struct nouveau_bios *bios = (void *)object;
454 return nouveau_subdev_init(&bios->base);
455}
456
457static int
458nouveau_bios_fini(struct nouveau_object *object, bool suspend)
459{
460 struct nouveau_bios *bios = (void *)object;
461 return nouveau_subdev_fini(&bios->base, suspend);
462}
463
464struct nouveau_oclass
465nouveau_bios_oclass = {
466 .handle = NV_SUBDEV(VBIOS, 0x00),
467 .ofuncs = &(struct nouveau_ofuncs) {
468 .ctor = nouveau_bios_ctor,
469 .dtor = nouveau_bios_dtor,
470 .init = nouveau_bios_init,
471 .fini = nouveau_bios_fini,
472 .rd08 = nouveau_bios_rd08,
473 .rd16 = nouveau_bios_rd16,
474 .rd32 = nouveau_bios_rd32,
475 .wr08 = nouveau_bios_wr08,
476 .wr16 = nouveau_bios_wr16,
477 .wr32 = nouveau_bios_wr32,
478 },
479};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
new file mode 100644
index 000000000000..1d03a3f2b2d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/bit.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/object.h"
26
27#include "subdev/bios.h"
28#include "subdev/bios/bit.h"
29
30int
31bit_entry(struct nouveau_bios *bios, u8 id, struct bit_entry *bit)
32{
33 if (likely(bios->bit_offset)) {
34 u8 entries = nv_ro08(bios, bios->bit_offset + 10);
35 u32 entry = bios->bit_offset + 12;
36 while (entries--) {
37 if (nv_ro08(bios, entry + 0) == id) {
38 bit->id = nv_ro08(bios, entry + 0);
39 bit->version = nv_ro08(bios, entry + 1);
40 bit->length = nv_ro16(bios, entry + 2);
41 bit->offset = nv_ro16(bios, entry + 4);
42 return 0;
43 }
44
45 entry += nv_ro08(bios, bios->bit_offset + 9);
46 }
47
48 return -ENOENT;
49 }
50
51 return -EINVAL;
52}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
new file mode 100644
index 000000000000..5ac010efd959
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/dcb.h>
29#include <subdev/bios/conn.h>
30
31u16
32dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{
34 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
35 if (dcb && *ver >= 0x30 && *hdr >= 0x16) {
36 u16 data = nv_ro16(bios, dcb + 0x14);
37 if (data) {
38 *ver = nv_ro08(bios, data + 0);
39 *hdr = nv_ro08(bios, data + 1);
40 *cnt = nv_ro08(bios, data + 2);
41 *len = nv_ro08(bios, data + 3);
42 return data;
43 }
44 }
45 return 0x0000;
46}
47
48u16
49dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
50{
51 u8 hdr, cnt;
52 u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len);
53 if (data && idx < cnt)
54 return data + hdr + (idx * *len);
55 return 0x0000;
56}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
new file mode 100644
index 000000000000..9ed6e728a94c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/device.h"
26
27#include "subdev/bios.h"
28#include "subdev/bios/dcb.h"
29
30u16
31dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 struct nouveau_device *device = nv_device(bios);
34 u16 dcb = 0x0000;
35
36 if (device->card_type > NV_04)
37 dcb = nv_ro16(bios, 0x36);
38 if (!dcb) {
39 nv_warn(bios, "DCB table not found\n");
40 return dcb;
41 }
42
43 *ver = nv_ro08(bios, dcb);
44
45 if (*ver >= 0x41) {
46 nv_warn(bios, "DCB *ver 0x%02x unknown\n", *ver);
47 return 0x0000;
48 } else
49 if (*ver >= 0x30) {
50 if (nv_ro32(bios, dcb + 6) == 0x4edcbdcb) {
51 *hdr = nv_ro08(bios, dcb + 1);
52 *cnt = nv_ro08(bios, dcb + 2);
53 *len = nv_ro08(bios, dcb + 3);
54 return dcb;
55 }
56 } else
57 if (*ver >= 0x20) {
58 if (nv_ro32(bios, dcb + 4) == 0x4edcbdcb) {
59 u16 i2c = nv_ro16(bios, dcb + 2);
60 *hdr = 8;
61 *cnt = (i2c - dcb) / 8;
62 *len = 8;
63 return dcb;
64 }
65 } else
66 if (*ver >= 0x15) {
67 if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
68 u16 i2c = nv_ro16(bios, dcb + 2);
69 *hdr = 4;
70 *cnt = (i2c - dcb) / 10;
71 *len = 10;
72 return dcb;
73 }
74 } else {
75 /*
76 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
77 * always has the same single (crt) entry, even when tv-out
78 * present, so the conclusion is this version cannot really
79 * be used.
80 *
81 * v1.2 tables (some NV6/10, and NV15+) normally have the
82 * same 5 entries, which are not specific to the card and so
83 * no use.
84 *
85 * v1.2 does have an I2C table that read_dcb_i2c_table can
86 * handle, but cards exist (nv11 in #14821) with a bad i2c
87 * table pointer, so use the indices parsed in
88 * parse_bmp_structure.
89 *
90 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
91 */
92 nv_warn(bios, "DCB contains no useful data\n");
93 return 0x0000;
94 }
95
96 nv_warn(bios, "DCB header validation failed\n");
97 return 0x0000;
98}
99
100u16
101dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
102{
103 u8 hdr, cnt;
104 u16 dcb = dcb_table(bios, ver, &hdr, &cnt, len);
105 if (dcb && idx < cnt)
106 return dcb + hdr + (idx * *len);
107 return 0x0000;
108}
109
110int
111dcb_outp_foreach(struct nouveau_bios *bios, void *data,
112 int (*exec)(struct nouveau_bios *, void *, int, u16))
113{
114 int ret, idx = -1;
115 u8 ver, len;
116 u16 outp;
117
118 while ((outp = dcb_outp(bios, ++idx, &ver, &len))) {
119 if (nv_ro32(bios, outp) == 0x00000000)
120 break; /* seen on an NV11 with DCB v1.5 */
121 if (nv_ro32(bios, outp) == 0xffffffff)
122 break; /* seen on an NV17 with DCB v2.0 */
123
124 if (nv_ro08(bios, outp) == DCB_OUTPUT_UNUSED)
125 continue;
126 if (nv_ro08(bios, outp) == DCB_OUTPUT_EOL)
127 break;
128
129 ret = exec(bios, data, idx, outp);
130 if (ret)
131 return ret;
132 }
133
134 return 0;
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
new file mode 100644
index 000000000000..3cbc0f3e8d5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25
26#include "subdev/bios.h"
27#include "subdev/bios/bit.h"
28#include "subdev/bios/dcb.h"
29#include "subdev/bios/dp.h"
30
31u16
32dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{
34 struct bit_entry bit_d;
35
36 if (!bit_entry(bios, 'd', &bit_d)) {
37 if (bit_d.version == 1) {
38 u16 data = nv_ro16(bios, bit_d.offset);
39 if (data) {
40 *ver = nv_ro08(bios, data + 0);
41 *hdr = nv_ro08(bios, data + 1);
42 *len = nv_ro08(bios, data + 2);
43 *cnt = nv_ro08(bios, data + 3);
44 return data;
45 }
46 }
47 }
48
49 return 0x0000;
50}
51
52u16
53dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
54{
55 u8 hdr, cnt;
56 u16 table = dp_table(bios, ver, &hdr, &cnt, len);
57 if (table && idx < cnt)
58 return nv_ro16(bios, table + hdr + (idx * *len));
59 return 0xffff;
60}
61
62u16
63dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp,
64 u8 *ver, u8 *len)
65{
66 u8 idx = 0;
67 u16 data;
68 while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) {
69 if (data) {
70 u32 hash = nv_ro32(bios, data);
71 if (dcb_hash_match(outp, hash))
72 return data;
73 }
74 }
75 return 0x0000;
76}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
new file mode 100644
index 000000000000..5afb568b2d69
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/extdev.h>
28
29static u16
30extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
31{
32 u8 dcb_ver, dcb_hdr, dcb_cnt, dcb_len;
33 u16 dcb, extdev = 0;
34
35 dcb = dcb_table(bios, &dcb_ver, &dcb_hdr, &dcb_cnt, &dcb_len);
36 if (!dcb || (dcb_ver != 0x30 && dcb_ver != 0x40))
37 return 0x0000;
38
39 extdev = nv_ro16(bios, dcb + 18);
40 if (!extdev)
41 return 0x0000;
42
43 *ver = nv_ro08(bios, extdev + 0);
44 *hdr = nv_ro08(bios, extdev + 1);
45 *cnt = nv_ro08(bios, extdev + 2);
46 *len = nv_ro08(bios, extdev + 3);
47
48 return extdev + *hdr;
49}
50
51u16
52nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
53{
54 u8 hdr, cnt;
55 u16 extdev = extdev_table(bios, ver, &hdr, len, &cnt);
56 if (extdev && idx < cnt)
57 return extdev + idx * *len;
58 return 0x0000;
59}
60
61static void
62extdev_parse_entry(struct nouveau_bios *bios, u16 offset,
63 struct nvbios_extdev_func *entry)
64{
65 entry->type = nv_ro08(bios, offset + 0);
66 entry->addr = nv_ro08(bios, offset + 1);
67 entry->bus = (nv_ro08(bios, offset + 2) >> 4) & 1;
68}
69
70int
71nvbios_extdev_parse(struct nouveau_bios *bios, int idx,
72 struct nvbios_extdev_func *func)
73{
74 u8 ver, len;
75 u16 entry;
76
77 if (!(entry = nvbios_extdev_entry(bios, idx, &ver, &len)))
78 return -EINVAL;
79
80 extdev_parse_entry(bios, entry, func);
81
82 return 0;
83}
84
85int
86nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
87 struct nvbios_extdev_func *func)
88{
89 u8 ver, len, i;
90 u16 entry;
91
92 i = 0;
93 while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
94 extdev_parse_entry(bios, entry, func);
95 if (func->type == type)
96 return 0;
97 }
98
99 return -EINVAL;
100}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
new file mode 100644
index 000000000000..4c9f1e508165
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/gpio.h>
28
29u16
30dcb_gpio_table(struct nouveau_bios *bios)
31{
32 u8 ver, hdr, cnt, len;
33 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
34 if (dcb) {
35 if (ver >= 0x30 && hdr >= 0x0c)
36 return nv_ro16(bios, dcb + 0x0a);
37 if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
38 return nv_ro16(bios, dcb - 0x0f);
39 }
40 return 0x0000;
41}
42
43u16
44dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver)
45{
46 u16 gpio = dcb_gpio_table(bios);
47 if (gpio) {
48 *ver = nv_ro08(bios, gpio);
49 if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2))
50 return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
51 else if (ent < nv_ro08(bios, gpio + 2))
52 return gpio + nv_ro08(bios, gpio + 1) +
53 (ent * nv_ro08(bios, gpio + 3));
54 }
55 return 0x0000;
56}
57
58int
59dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
60 struct dcb_gpio_func *gpio)
61{
62 u8 ver, hdr, cnt, len;
63 u16 entry;
64 int i = -1;
65
66 while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
67 if (ver < 0x40) {
68 u16 data = nv_ro16(bios, entry);
69 *gpio = (struct dcb_gpio_func) {
70 .line = (data & 0x001f) >> 0,
71 .func = (data & 0x07e0) >> 5,
72 .log[0] = (data & 0x1800) >> 11,
73 .log[1] = (data & 0x6000) >> 13,
74 .param = !!(data & 0x8000),
75 };
76 } else
77 if (ver < 0x41) {
78 u32 data = nv_ro32(bios, entry);
79 *gpio = (struct dcb_gpio_func) {
80 .line = (data & 0x0000001f) >> 0,
81 .func = (data & 0x0000ff00) >> 8,
82 .log[0] = (data & 0x18000000) >> 27,
83 .log[1] = (data & 0x60000000) >> 29,
84 .param = !!(data & 0x80000000),
85 };
86 } else {
87 u32 data = nv_ro32(bios, entry + 0);
88 u8 data1 = nv_ro32(bios, entry + 4);
89 *gpio = (struct dcb_gpio_func) {
90 .line = (data & 0x0000003f) >> 0,
91 .func = (data & 0x0000ff00) >> 8,
92 .log[0] = (data1 & 0x30) >> 4,
93 .log[1] = (data1 & 0xc0) >> 6,
94 .param = !!(data & 0x80000000),
95 };
96 }
97
98 if ((line == 0xff || line == gpio->line) &&
99 (func == 0xff || func == gpio->func))
100 return 0;
101 }
102
103 /* DCB 2.2, fixed TVDAC GPIO data */
104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) {
105 if (func == DCB_GPIO_TVDAC0) {
106 u8 conf = nv_ro08(bios, entry - 5);
107 u8 addr = nv_ro08(bios, entry - 4);
108 if (conf & 0x01) {
109 *gpio = (struct dcb_gpio_func) {
110 .func = DCB_GPIO_TVDAC0,
111 .line = addr >> 4,
112 .log[0] = !!(conf & 0x02),
113 .log[1] = !(conf & 0x02),
114 };
115 return 0;
116 }
117 }
118 }
119
120 return -EINVAL;
121}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
new file mode 100644
index 000000000000..ad577db83766
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25
26#include "subdev/bios.h"
27#include "subdev/bios/dcb.h"
28#include "subdev/bios/i2c.h"
29
30u16
31dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
32{
33 u16 i2c = 0x0000;
34 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
35 if (dcb) {
36 if (*ver >= 0x15)
37 i2c = nv_ro16(bios, dcb + 2);
38 if (*ver >= 0x30)
39 i2c = nv_ro16(bios, dcb + 4);
40 }
41
42 if (i2c && *ver >= 0x30) {
43 *ver = nv_ro08(bios, i2c + 0);
44 *hdr = nv_ro08(bios, i2c + 1);
45 *cnt = nv_ro08(bios, i2c + 2);
46 *len = nv_ro08(bios, i2c + 3);
47 } else {
48 *ver = *ver; /* use DCB version */
49 *hdr = 0;
50 *cnt = 16;
51 *len = 4;
52 }
53
54 return i2c;
55}
56
57u16
58dcb_i2c_entry(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
59{
60 u8 hdr, cnt;
61 u16 i2c = dcb_i2c_table(bios, ver, &hdr, &cnt, len);
62 if (i2c && idx < cnt)
63 return i2c + hdr + (idx * *len);
64 return 0x0000;
65}
66
67int
68dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
69{
70 u8 ver, len;
71 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
72 if (ent) {
73 info->data = nv_ro32(bios, ent + 0);
74 info->type = nv_ro08(bios, ent + 3);
75 if (ver < 0x30) {
76 info->type &= 0x07;
77 if (info->type == 0x07)
78 info->type = 0xff;
79 }
80
81 switch (info->type) {
82 case DCB_I2C_NV04_BIT:
83 info->drive = nv_ro08(bios, ent + 0);
84 info->sense = nv_ro08(bios, ent + 1);
85 return 0;
86 case DCB_I2C_NV4E_BIT:
87 info->drive = nv_ro08(bios, ent + 1);
88 return 0;
89 case DCB_I2C_NVIO_BIT:
90 case DCB_I2C_NVIO_AUX:
91 info->drive = nv_ro08(bios, ent + 0);
92 return 0;
93 case DCB_I2C_UNUSED:
94 return 0;
95 default:
96 nv_warn(bios, "unknown i2c type %d\n", info->type);
97 info->type = DCB_I2C_UNUSED;
98 return 0;
99 }
100 }
101
102 if (bios->bmp_offset && idx < 2) {
103 /* BMP (from v4.0 has i2c info in the structure, it's in a
104 * fixed location on earlier VBIOS
105 */
106 if (nv_ro08(bios, bios->bmp_offset + 5) < 4)
107 ent = 0x0048;
108 else
109 ent = 0x0036 + bios->bmp_offset;
110
111 if (idx == 0) {
112 info->drive = nv_ro08(bios, ent + 4);
113 if (!info->drive) info->drive = 0x3f;
114 info->sense = nv_ro08(bios, ent + 5);
115 if (!info->sense) info->sense = 0x3e;
116 } else
117 if (idx == 1) {
118 info->drive = nv_ro08(bios, ent + 6);
119 if (!info->drive) info->drive = 0x37;
120 info->sense = nv_ro08(bios, ent + 7);
121 if (!info->sense) info->sense = 0x36;
122 }
123
124 info->type = DCB_I2C_NV04_BIT;
125 return 0;
126 }
127
128 return -ENOENT;
129}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
new file mode 100644
index 000000000000..6be8c32f6e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -0,0 +1,2120 @@
1#include <core/engine.h>
2#include <core/device.h>
3
4#include <subdev/bios.h>
5#include <subdev/bios/conn.h>
6#include <subdev/bios/bmp.h>
7#include <subdev/bios/bit.h>
8#include <subdev/bios/dcb.h>
9#include <subdev/bios/dp.h>
10#include <subdev/bios/init.h>
11#include <subdev/devinit.h>
12#include <subdev/clock.h>
13#include <subdev/i2c.h>
14#include <subdev/vga.h>
15#include <subdev/gpio.h>
16
17#define bioslog(lvl, fmt, args...) do { \
18 nv_printk(init->bios, lvl, "0x%04x[%c]: "fmt, init->offset, \
19 init_exec(init) ? '0' + (init->nested - 1) : ' ', ##args); \
20} while(0)
21#define cont(fmt, args...) do { \
22 if (nv_subdev(init->bios)->debug >= NV_DBG_TRACE) \
23 printk(fmt, ##args); \
24} while(0)
25#define trace(fmt, args...) bioslog(TRACE, fmt, ##args)
26#define warn(fmt, args...) bioslog(WARN, fmt, ##args)
27#define error(fmt, args...) bioslog(ERROR, fmt, ##args)
28
29/******************************************************************************
30 * init parser control flow helpers
31 *****************************************************************************/
32
33static inline bool
34init_exec(struct nvbios_init *init)
35{
36 return (init->execute == 1) || ((init->execute & 5) == 5);
37}
38
39static inline void
40init_exec_set(struct nvbios_init *init, bool exec)
41{
42 if (exec) init->execute &= 0xfd;
43 else init->execute |= 0x02;
44}
45
46static inline void
47init_exec_inv(struct nvbios_init *init)
48{
49 init->execute ^= 0x02;
50}
51
52static inline void
53init_exec_force(struct nvbios_init *init, bool exec)
54{
55 if (exec) init->execute |= 0x04;
56 else init->execute &= 0xfb;
57}
58
59/******************************************************************************
60 * init parser wrappers for normal register/i2c/whatever accessors
61 *****************************************************************************/
62
63static inline int
64init_or(struct nvbios_init *init)
65{
66 if (init->outp)
67 return ffs(init->outp->or) - 1;
68 error("script needs OR!!\n");
69 return 0;
70}
71
72static inline int
73init_link(struct nvbios_init *init)
74{
75 if (init->outp)
76 return !(init->outp->sorconf.link & 1);
77 error("script needs OR link\n");
78 return 0;
79}
80
81static inline int
82init_crtc(struct nvbios_init *init)
83{
84 if (init->crtc >= 0)
85 return init->crtc;
86 error("script needs crtc\n");
87 return 0;
88}
89
90static u8
91init_conn(struct nvbios_init *init)
92{
93 struct nouveau_bios *bios = init->bios;
94
95 if (init->outp) {
96 u8 ver, len;
97 u16 conn = dcb_conn(bios, init->outp->connector, &ver, &len);
98 if (conn)
99 return nv_ro08(bios, conn);
100 }
101
102 error("script needs connector type\n");
103 return 0x00;
104}
105
106static inline u32
107init_nvreg(struct nvbios_init *init, u32 reg)
108{
109 /* C51 (at least) sometimes has the lower bits set which the VBIOS
110 * interprets to mean that access needs to go through certain IO
111 * ports instead. The NVIDIA binary driver has been seen to access
112 * these through the NV register address, so lets assume we can
113 * do the same
114 */
115 reg &= ~0x00000003;
116
117 /* GF8+ display scripts need register addresses mangled a bit to
118 * select a specific CRTC/OR
119 */
120 if (nv_device(init->bios)->card_type >= NV_50) {
121 if (reg & 0x80000000) {
122 reg += init_crtc(init) * 0x800;
123 reg &= ~0x80000000;
124 }
125
126 if (reg & 0x40000000) {
127 reg += init_or(init) * 0x800;
128 reg &= ~0x40000000;
129 if (reg & 0x20000000) {
130 reg += init_link(init) * 0x80;
131 reg &= ~0x20000000;
132 }
133 }
134 }
135
136 if (reg & ~0x00fffffc)
137 warn("unknown bits in register 0x%08x\n", reg);
138 return reg;
139}
140
141static u32
142init_rd32(struct nvbios_init *init, u32 reg)
143{
144 reg = init_nvreg(init, reg);
145 if (init_exec(init))
146 return nv_rd32(init->subdev, reg);
147 return 0x00000000;
148}
149
150static void
151init_wr32(struct nvbios_init *init, u32 reg, u32 val)
152{
153 reg = init_nvreg(init, reg);
154 if (init_exec(init))
155 nv_wr32(init->subdev, reg, val);
156}
157
158static u32
159init_mask(struct nvbios_init *init, u32 reg, u32 mask, u32 val)
160{
161 reg = init_nvreg(init, reg);
162 if (init_exec(init)) {
163 u32 tmp = nv_rd32(init->subdev, reg);
164 nv_wr32(init->subdev, reg, (tmp & ~mask) | val);
165 return tmp;
166 }
167 return 0x00000000;
168}
169
170static u8
171init_rdport(struct nvbios_init *init, u16 port)
172{
173 if (init_exec(init))
174 return nv_rdport(init->subdev, init->crtc, port);
175 return 0x00;
176}
177
178static void
179init_wrport(struct nvbios_init *init, u16 port, u8 value)
180{
181 if (init_exec(init))
182 nv_wrport(init->subdev, init->crtc, port, value);
183}
184
185static u8
186init_rdvgai(struct nvbios_init *init, u16 port, u8 index)
187{
188 struct nouveau_subdev *subdev = init->subdev;
189 if (init_exec(init)) {
190 int head = init->crtc < 0 ? 0 : init->crtc;
191 return nv_rdvgai(subdev, head, port, index);
192 }
193 return 0x00;
194}
195
196static void
197init_wrvgai(struct nvbios_init *init, u16 port, u8 index, u8 value)
198{
199 /* force head 0 for updates to cr44, it only exists on first head */
200 if (nv_device(init->subdev)->card_type < NV_50) {
201 if (port == 0x03d4 && index == 0x44)
202 init->crtc = 0;
203 }
204
205 if (init_exec(init)) {
206 int head = init->crtc < 0 ? 0 : init->crtc;
207 nv_wrvgai(init->subdev, head, port, index, value);
208 }
209
210 /* select head 1 if cr44 write selected it */
211 if (nv_device(init->subdev)->card_type < NV_50) {
212 if (port == 0x03d4 && index == 0x44 && value == 3)
213 init->crtc = 1;
214 }
215}
216
217static struct nouveau_i2c_port *
218init_i2c(struct nvbios_init *init, int index)
219{
220 struct nouveau_i2c *i2c = nouveau_i2c(init->bios);
221
222 if (index == 0xff) {
223 index = NV_I2C_DEFAULT(0);
224 if (init->outp && init->outp->i2c_upper_default)
225 index = NV_I2C_DEFAULT(1);
226 } else
227 if (index < 0) {
228 if (!init->outp) {
229 error("script needs output for i2c\n");
230 return NULL;
231 }
232
233 index = init->outp->i2c_index;
234 }
235
236 return i2c->find(i2c, index);
237}
238
239static int
240init_rdi2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg)
241{
242 struct nouveau_i2c_port *port = init_i2c(init, index);
243 if (port && init_exec(init))
244 return nv_rdi2cr(port, addr, reg);
245 return -ENODEV;
246}
247
248static int
249init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
250{
251 struct nouveau_i2c_port *port = init_i2c(init, index);
252 if (port && init_exec(init))
253 return nv_wri2cr(port, addr, reg, val);
254 return -ENODEV;
255}
256
257static int
258init_rdauxr(struct nvbios_init *init, u32 addr)
259{
260 struct nouveau_i2c_port *port = init_i2c(init, -1);
261 u8 data;
262
263 if (port && init_exec(init)) {
264 int ret = nv_rdaux(port, addr, &data, 1);
265 if (ret)
266 return ret;
267 return data;
268 }
269
270 return -ENODEV;
271}
272
273static int
274init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
275{
276 struct nouveau_i2c_port *port = init_i2c(init, -1);
277 if (port && init_exec(init))
278 return nv_wraux(port, addr, &data, 1);
279 return -ENODEV;
280}
281
282static void
283init_prog_pll(struct nvbios_init *init, u32 id, u32 freq)
284{
285 struct nouveau_clock *clk = nouveau_clock(init->bios);
286 if (clk && clk->pll_set && init_exec(init)) {
287 int ret = clk->pll_set(clk, id, freq);
288 if (ret)
289 warn("failed to prog pll 0x%08x to %dkHz\n", id, freq);
290 }
291}
292
293/******************************************************************************
294 * parsing of bios structures that are required to execute init tables
295 *****************************************************************************/
296
297static u16
298init_table(struct nouveau_bios *bios, u16 *len)
299{
300 struct bit_entry bit_I;
301
302 if (!bit_entry(bios, 'I', &bit_I)) {
303 *len = bit_I.length;
304 return bit_I.offset;
305 }
306
307 if (bmp_version(bios) >= 0x0510) {
308 *len = 14;
309 return bios->bmp_offset + 75;
310 }
311
312 return 0x0000;
313}
314
315static u16
316init_table_(struct nvbios_init *init, u16 offset, const char *name)
317{
318 struct nouveau_bios *bios = init->bios;
319 u16 len, data = init_table(bios, &len);
320 if (data) {
321 if (len >= offset + 2) {
322 data = nv_ro16(bios, data + offset);
323 if (data)
324 return data;
325
326 warn("%s pointer invalid\n", name);
327 return 0x0000;
328 }
329
330 warn("init data too short for %s pointer", name);
331 return 0x0000;
332 }
333
334 warn("init data not found\n");
335 return 0x0000;
336}
337
338#define init_script_table(b) init_table_((b), 0x00, "script table")
339#define init_macro_index_table(b) init_table_((b), 0x02, "macro index table")
340#define init_macro_table(b) init_table_((b), 0x04, "macro table")
341#define init_condition_table(b) init_table_((b), 0x06, "condition table")
342#define init_io_condition_table(b) init_table_((b), 0x08, "io condition table")
343#define init_io_flag_condition_table(b) init_table_((b), 0x0a, "io flag conditon table")
344#define init_function_table(b) init_table_((b), 0x0c, "function table")
345#define init_xlat_table(b) init_table_((b), 0x10, "xlat table");
346
347static u16
348init_script(struct nouveau_bios *bios, int index)
349{
350 struct nvbios_init init = { .bios = bios };
351 u16 data;
352
353 if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
354 if (index > 1)
355 return 0x0000;
356
357 data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
358 return nv_ro16(bios, data + (index * 2));
359 }
360
361 data = init_script_table(&init);
362 if (data)
363 return nv_ro16(bios, data + (index * 2));
364
365 return 0x0000;
366}
367
368static u16
369init_unknown_script(struct nouveau_bios *bios)
370{
371 u16 len, data = init_table(bios, &len);
372 if (data && len >= 16)
373 return nv_ro16(bios, data + 14);
374 return 0x0000;
375}
376
377static u16
378init_ram_restrict_table(struct nvbios_init *init)
379{
380 struct nouveau_bios *bios = init->bios;
381 struct bit_entry bit_M;
382 u16 data = 0x0000;
383
384 if (!bit_entry(bios, 'M', &bit_M)) {
385 if (bit_M.version == 1 && bit_M.length >= 5)
386 data = nv_ro16(bios, bit_M.offset + 3);
387 if (bit_M.version == 2 && bit_M.length >= 3)
388 data = nv_ro16(bios, bit_M.offset + 1);
389 }
390
391 if (data == 0x0000)
392 warn("ram restrict table not found\n");
393 return data;
394}
395
396static u8
397init_ram_restrict_group_count(struct nvbios_init *init)
398{
399 struct nouveau_bios *bios = init->bios;
400 struct bit_entry bit_M;
401
402 if (!bit_entry(bios, 'M', &bit_M)) {
403 if (bit_M.version == 1 && bit_M.length >= 5)
404 return nv_ro08(bios, bit_M.offset + 2);
405 if (bit_M.version == 2 && bit_M.length >= 3)
406 return nv_ro08(bios, bit_M.offset + 0);
407 }
408
409 return 0x00;
410}
411
412static u8
413init_ram_restrict(struct nvbios_init *init)
414{
415 u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2;
416 u16 table = init_ram_restrict_table(init);
417 if (table)
418 return nv_ro08(init->bios, table + strap);
419 return 0x00;
420}
421
422static u8
423init_xlat_(struct nvbios_init *init, u8 index, u8 offset)
424{
425 struct nouveau_bios *bios = init->bios;
426 u16 table = init_xlat_table(init);
427 if (table) {
428 u16 data = nv_ro16(bios, table + (index * 2));
429 if (data)
430 return nv_ro08(bios, data + offset);
431 warn("xlat table pointer %d invalid\n", index);
432 }
433 return 0x00;
434}
435
436/******************************************************************************
437 * utility functions used by various init opcode handlers
438 *****************************************************************************/
439
440static bool
441init_condition_met(struct nvbios_init *init, u8 cond)
442{
443 struct nouveau_bios *bios = init->bios;
444 u16 table = init_condition_table(init);
445 if (table) {
446 u32 reg = nv_ro32(bios, table + (cond * 12) + 0);
447 u32 msk = nv_ro32(bios, table + (cond * 12) + 4);
448 u32 val = nv_ro32(bios, table + (cond * 12) + 8);
449 trace("\t[0x%02x] (R[0x%06x] & 0x%08x) == 0x%08x\n",
450 cond, reg, msk, val);
451 return (init_rd32(init, reg) & msk) == val;
452 }
453 return false;
454}
455
456static bool
457init_io_condition_met(struct nvbios_init *init, u8 cond)
458{
459 struct nouveau_bios *bios = init->bios;
460 u16 table = init_io_condition_table(init);
461 if (table) {
462 u16 port = nv_ro16(bios, table + (cond * 5) + 0);
463 u8 index = nv_ro08(bios, table + (cond * 5) + 2);
464 u8 mask = nv_ro08(bios, table + (cond * 5) + 3);
465 u8 value = nv_ro08(bios, table + (cond * 5) + 4);
466 trace("\t[0x%02x] (0x%04x[0x%02x] & 0x%02x) == 0x%02x\n",
467 cond, port, index, mask, value);
468 return (init_rdvgai(init, port, index) & mask) == value;
469 }
470 return false;
471}
472
473static bool
474init_io_flag_condition_met(struct nvbios_init *init, u8 cond)
475{
476 struct nouveau_bios *bios = init->bios;
477 u16 table = init_io_flag_condition_table(init);
478 if (table) {
479 u16 port = nv_ro16(bios, table + (cond * 9) + 0);
480 u8 index = nv_ro08(bios, table + (cond * 9) + 2);
481 u8 mask = nv_ro08(bios, table + (cond * 9) + 3);
482 u8 shift = nv_ro08(bios, table + (cond * 9) + 4);
483 u16 data = nv_ro16(bios, table + (cond * 9) + 5);
484 u8 dmask = nv_ro08(bios, table + (cond * 9) + 7);
485 u8 value = nv_ro08(bios, table + (cond * 9) + 8);
486 u8 ioval = (init_rdvgai(init, port, index) & mask) >> shift;
487 return (nv_ro08(bios, data + ioval) & dmask) == value;
488 }
489 return false;
490}
491
492static inline u32
493init_shift(u32 data, u8 shift)
494{
495 if (shift < 0x80)
496 return data >> shift;
497 return data << (0x100 - shift);
498}
499
500static u32
501init_tmds_reg(struct nvbios_init *init, u8 tmds)
502{
503 /* For mlv < 0x80, it is an index into a table of TMDS base addresses.
504 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
505 * CR58 for CR57 = 0 to index a table of offsets to the basic
506 * 0x6808b0 address.
507 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
508 * CR58 for CR57 = 0 to index a table of offsets to the basic
509 * 0x6808b0 address, and then flip the offset by 8.
510 */
511
512 const int pramdac_offset[13] = {
513 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
514 const u32 pramdac_table[4] = {
515 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
516
517 if (tmds >= 0x80) {
518 if (init->outp) {
519 u32 dacoffset = pramdac_offset[init->outp->or];
520 if (tmds == 0x81)
521 dacoffset ^= 8;
522 return 0x6808b0 + dacoffset;
523 }
524
525 error("tmds opcodes need dcb\n");
526 } else {
527 if (tmds < ARRAY_SIZE(pramdac_table))
528 return pramdac_table[tmds];
529
530 error("tmds selector 0x%02x unknown\n", tmds);
531 }
532
533 return 0;
534}
535
536/******************************************************************************
537 * init opcode handlers
538 *****************************************************************************/
539
540/**
541 * init_reserved - stub for various unknown/unused single-byte opcodes
542 *
543 */
544static void
545init_reserved(struct nvbios_init *init)
546{
547 u8 opcode = nv_ro08(init->bios, init->offset);
548 trace("RESERVED\t0x%02x\n", opcode);
549 init->offset += 1;
550}
551
552/**
553 * INIT_DONE - opcode 0x71
554 *
555 */
556static void
557init_done(struct nvbios_init *init)
558{
559 trace("DONE\n");
560 init->offset = 0x0000;
561}
562
563/**
564 * INIT_IO_RESTRICT_PROG - opcode 0x32
565 *
566 */
567static void
568init_io_restrict_prog(struct nvbios_init *init)
569{
570 struct nouveau_bios *bios = init->bios;
571 u16 port = nv_ro16(bios, init->offset + 1);
572 u8 index = nv_ro08(bios, init->offset + 3);
573 u8 mask = nv_ro08(bios, init->offset + 4);
574 u8 shift = nv_ro08(bios, init->offset + 5);
575 u8 count = nv_ro08(bios, init->offset + 6);
576 u32 reg = nv_ro32(bios, init->offset + 7);
577 u8 conf, i;
578
579 trace("IO_RESTRICT_PROG\tR[0x%06x] = "
580 "((0x%04x[0x%02x] & 0x%02x) >> %d) [{\n",
581 reg, port, index, mask, shift);
582 init->offset += 11;
583
584 conf = (init_rdvgai(init, port, index) & mask) >> shift;
585 for (i = 0; i < count; i++) {
586 u32 data = nv_ro32(bios, init->offset);
587
588 if (i == conf) {
589 trace("\t0x%08x *\n", data);
590 init_wr32(init, reg, data);
591 } else {
592 trace("\t0x%08x\n", data);
593 }
594
595 init->offset += 4;
596 }
597 trace("}]\n");
598}
599
600/**
601 * INIT_REPEAT - opcode 0x33
602 *
603 */
604static void
605init_repeat(struct nvbios_init *init)
606{
607 struct nouveau_bios *bios = init->bios;
608 u8 count = nv_ro08(bios, init->offset + 1);
609 u16 repeat = init->repeat;
610
611 trace("REPEAT\t0x%02x\n", count);
612 init->offset += 2;
613
614 init->repeat = init->offset;
615 init->repend = init->offset;
616 while (count--) {
617 init->offset = init->repeat;
618 nvbios_exec(init);
619 if (count)
620 trace("REPEAT\t0x%02x\n", count);
621 }
622 init->offset = init->repend;
623 init->repeat = repeat;
624}
625
626/**
627 * INIT_IO_RESTRICT_PLL - opcode 0x34
628 *
629 */
630static void
631init_io_restrict_pll(struct nvbios_init *init)
632{
633 struct nouveau_bios *bios = init->bios;
634 u16 port = nv_ro16(bios, init->offset + 1);
635 u8 index = nv_ro08(bios, init->offset + 3);
636 u8 mask = nv_ro08(bios, init->offset + 4);
637 u8 shift = nv_ro08(bios, init->offset + 5);
638 s8 iofc = nv_ro08(bios, init->offset + 6);
639 u8 count = nv_ro08(bios, init->offset + 7);
640 u32 reg = nv_ro32(bios, init->offset + 8);
641 u8 conf, i;
642
643 trace("IO_RESTRICT_PLL\tR[0x%06x] =PLL= "
644 "((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) IOFCOND 0x%02x [{\n",
645 reg, port, index, mask, shift, iofc);
646 init->offset += 12;
647
648 conf = (init_rdvgai(init, port, index) & mask) >> shift;
649 for (i = 0; i < count; i++) {
650 u32 freq = nv_ro16(bios, init->offset) * 10;
651
652 if (i == conf) {
653 trace("\t%dkHz *\n", freq);
654 if (iofc > 0 && init_io_flag_condition_met(init, iofc))
655 freq *= 2;
656 init_prog_pll(init, reg, freq);
657 } else {
658 trace("\t%dkHz\n", freq);
659 }
660
661 init->offset += 2;
662 }
663 trace("}]\n");
664}
665
666/**
667 * INIT_END_REPEAT - opcode 0x36
668 *
669 */
670static void
671init_end_repeat(struct nvbios_init *init)
672{
673 trace("END_REPEAT\n");
674 init->offset += 1;
675
676 if (init->repeat) {
677 init->repend = init->offset;
678 init->offset = 0;
679 }
680}
681
682/**
683 * INIT_COPY - opcode 0x37
684 *
685 */
686static void
687init_copy(struct nvbios_init *init)
688{
689 struct nouveau_bios *bios = init->bios;
690 u32 reg = nv_ro32(bios, init->offset + 1);
691 u8 shift = nv_ro08(bios, init->offset + 5);
692 u8 smask = nv_ro08(bios, init->offset + 6);
693 u16 port = nv_ro16(bios, init->offset + 7);
694 u8 index = nv_ro08(bios, init->offset + 9);
695 u8 mask = nv_ro08(bios, init->offset + 10);
696 u8 data;
697
698 trace("COPY\t0x%04x[0x%02x] &= 0x%02x |= "
699 "((R[0x%06x] %s 0x%02x) & 0x%02x)\n",
700 port, index, mask, reg, (shift & 0x80) ? "<<" : ">>",
701 (shift & 0x80) ? (0x100 - shift) : shift, smask);
702 init->offset += 11;
703
704 data = init_rdvgai(init, port, index) & mask;
705 data |= init_shift(init_rd32(init, reg), shift) & smask;
706 init_wrvgai(init, port, index, data);
707}
708
709/**
710 * INIT_NOT - opcode 0x38
711 *
712 */
713static void
714init_not(struct nvbios_init *init)
715{
716 trace("NOT\n");
717 init->offset += 1;
718 init_exec_inv(init);
719}
720
721/**
722 * INIT_IO_FLAG_CONDITION - opcode 0x39
723 *
724 */
725static void
726init_io_flag_condition(struct nvbios_init *init)
727{
728 struct nouveau_bios *bios = init->bios;
729 u8 cond = nv_ro08(bios, init->offset + 1);
730
731 trace("IO_FLAG_CONDITION\t0x%02x\n", cond);
732 init->offset += 2;
733
734 if (!init_io_flag_condition_met(init, cond))
735 init_exec_set(init, false);
736}
737
738/**
739 * INIT_DP_CONDITION - opcode 0x3a
740 *
741 */
742static void
743init_dp_condition(struct nvbios_init *init)
744{
745 struct nouveau_bios *bios = init->bios;
746 u8 cond = nv_ro08(bios, init->offset + 1);
747 u8 unkn = nv_ro08(bios, init->offset + 2);
748 u8 ver, len;
749 u16 data;
750
751 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
752 init->offset += 3;
753
754 switch (cond) {
755 case 0:
756 if (init_conn(init) != DCB_CONNECTOR_eDP)
757 init_exec_set(init, false);
758 break;
759 case 1:
760 case 2:
761 if ( init->outp &&
762 (data = dp_outp_match(bios, init->outp, &ver, &len))) {
763 if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond))
764 init_exec_set(init, false);
765 if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond))
766 init_exec_set(init, false);
767 break;
768 }
769
770 warn("script needs dp output table data\n");
771 break;
772 case 5:
773 if (!(init_rdauxr(init, 0x0d) & 1))
774 init_exec_set(init, false);
775 break;
776 default:
777 warn("unknown dp condition 0x%02x\n", cond);
778 break;
779 }
780}
781
782/**
783 * INIT_IO_MASK_OR - opcode 0x3b
784 *
785 */
786static void
787init_io_mask_or(struct nvbios_init *init)
788{
789 struct nouveau_bios *bios = init->bios;
790 u8 index = nv_ro08(bios, init->offset + 1);
791 u8 or = init_or(init);
792 u8 data;
793
794 trace("IO_MASK_OR\t0x03d4[0x%02x] &= ~(1 << 0x%02x)", index, or);
795 init->offset += 2;
796
797 data = init_rdvgai(init, 0x03d4, index);
798 init_wrvgai(init, 0x03d4, index, data &= ~(1 << or));
799}
800
801/**
802 * INIT_IO_OR - opcode 0x3c
803 *
804 */
805static void
806init_io_or(struct nvbios_init *init)
807{
808 struct nouveau_bios *bios = init->bios;
809 u8 index = nv_ro08(bios, init->offset + 1);
810 u8 or = init_or(init);
811 u8 data;
812
813 trace("IO_OR\t0x03d4[0x%02x] |= (1 << 0x%02x)", index, or);
814 init->offset += 2;
815
816 data = init_rdvgai(init, 0x03d4, index);
817 init_wrvgai(init, 0x03d4, index, data | (1 << or));
818}
819
820/**
821 * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
822 *
823 */
824static void
825init_idx_addr_latched(struct nvbios_init *init)
826{
827 struct nouveau_bios *bios = init->bios;
828 u32 creg = nv_ro32(bios, init->offset + 1);
829 u32 dreg = nv_ro32(bios, init->offset + 5);
830 u32 mask = nv_ro32(bios, init->offset + 9);
831 u32 data = nv_ro32(bios, init->offset + 13);
832 u8 count = nv_ro08(bios, init->offset + 17);
833
834 trace("INDEX_ADDRESS_LATCHED\t"
835 "R[0x%06x] : R[0x%06x]\n\tCTRL &= 0x%08x |= 0x%08x\n",
836 creg, dreg, mask, data);
837 init->offset += 18;
838
839 while (count--) {
840 u8 iaddr = nv_ro08(bios, init->offset + 0);
841 u8 idata = nv_ro08(bios, init->offset + 1);
842
843 trace("\t[0x%02x] = 0x%02x\n", iaddr, idata);
844 init->offset += 2;
845
846 init_wr32(init, dreg, idata);
847 init_mask(init, creg, ~mask, data | idata);
848 }
849}
850
851/**
852 * INIT_IO_RESTRICT_PLL2 - opcode 0x4a
853 *
854 */
855static void
856init_io_restrict_pll2(struct nvbios_init *init)
857{
858 struct nouveau_bios *bios = init->bios;
859 u16 port = nv_ro16(bios, init->offset + 1);
860 u8 index = nv_ro08(bios, init->offset + 3);
861 u8 mask = nv_ro08(bios, init->offset + 4);
862 u8 shift = nv_ro08(bios, init->offset + 5);
863 u8 count = nv_ro08(bios, init->offset + 6);
864 u32 reg = nv_ro32(bios, init->offset + 7);
865 u8 conf, i;
866
867 trace("IO_RESTRICT_PLL2\t"
868 "R[0x%06x] =PLL= ((0x%04x[0x%02x] & 0x%02x) >> 0x%02x) [{\n",
869 reg, port, index, mask, shift);
870 init->offset += 11;
871
872 conf = (init_rdvgai(init, port, index) & mask) >> shift;
873 for (i = 0; i < count; i++) {
874 u32 freq = nv_ro32(bios, init->offset);
875 if (i == conf) {
876 trace("\t%dkHz *\n", freq);
877 init_prog_pll(init, reg, freq);
878 } else {
879 trace("\t%dkHz\n", freq);
880 }
881 init->offset += 4;
882 }
883 trace("}]\n");
884}
885
886/**
887 * INIT_PLL2 - opcode 0x4b
888 *
889 */
890static void
891init_pll2(struct nvbios_init *init)
892{
893 struct nouveau_bios *bios = init->bios;
894 u32 reg = nv_ro32(bios, init->offset + 1);
895 u32 freq = nv_ro32(bios, init->offset + 5);
896
897 trace("PLL2\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
898 init->offset += 9;
899
900 init_prog_pll(init, reg, freq);
901}
902
903/**
904 * INIT_I2C_BYTE - opcode 0x4c
905 *
906 */
907static void
908init_i2c_byte(struct nvbios_init *init)
909{
910 struct nouveau_bios *bios = init->bios;
911 u8 index = nv_ro08(bios, init->offset + 1);
912 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
913 u8 count = nv_ro08(bios, init->offset + 3);
914
915 trace("I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
916 init->offset += 4;
917
918 while (count--) {
919 u8 reg = nv_ro08(bios, init->offset + 0);
920 u8 mask = nv_ro08(bios, init->offset + 1);
921 u8 data = nv_ro08(bios, init->offset + 2);
922 int val;
923
924 trace("\t[0x%02x] &= 0x%02x |= 0x%02x\n", reg, mask, data);
925 init->offset += 3;
926
927 val = init_rdi2cr(init, index, addr, reg);
928 if (val < 0)
929 continue;
930 init_wri2cr(init, index, addr, reg, (val & mask) | data);
931 }
932}
933
934/**
935 * INIT_ZM_I2C_BYTE - opcode 0x4d
936 *
937 */
938static void
939init_zm_i2c_byte(struct nvbios_init *init)
940{
941 struct nouveau_bios *bios = init->bios;
942 u8 index = nv_ro08(bios, init->offset + 1);
943 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
944 u8 count = nv_ro08(bios, init->offset + 3);
945
946 trace("ZM_I2C_BYTE\tI2C[0x%02x][0x%02x]\n", index, addr);
947 init->offset += 4;
948
949 while (count--) {
950 u8 reg = nv_ro08(bios, init->offset + 0);
951 u8 data = nv_ro08(bios, init->offset + 1);
952
953 trace("\t[0x%02x] = 0x%02x\n", reg, data);
954 init->offset += 2;
955
956 init_wri2cr(init, index, addr, reg, data);
957 }
958
959}
960
961/**
962 * INIT_ZM_I2C - opcode 0x4e
963 *
964 */
965static void
966init_zm_i2c(struct nvbios_init *init)
967{
968 struct nouveau_bios *bios = init->bios;
969 u8 index = nv_ro08(bios, init->offset + 1);
970 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
971 u8 count = nv_ro08(bios, init->offset + 3);
972 u8 data[256], i;
973
974 trace("ZM_I2C\tI2C[0x%02x][0x%02x]\n", index, addr);
975 init->offset += 4;
976
977 for (i = 0; i < count; i++) {
978 data[i] = nv_ro08(bios, init->offset);
979 trace("\t0x%02x\n", data[i]);
980 init->offset++;
981 }
982
983 if (init_exec(init)) {
984 struct nouveau_i2c_port *port = init_i2c(init, index);
985 struct i2c_msg msg = {
986 .addr = addr, .flags = 0, .len = count, .buf = data,
987 };
988 int ret;
989
990 if (port && (ret = i2c_transfer(&port->adapter, &msg, 1)) != 1)
991 warn("i2c wr failed, %d\n", ret);
992 }
993}
994
995/**
996 * INIT_TMDS - opcode 0x4f
997 *
998 */
999static void
1000init_tmds(struct nvbios_init *init)
1001{
1002 struct nouveau_bios *bios = init->bios;
1003 u8 tmds = nv_ro08(bios, init->offset + 1);
1004 u8 addr = nv_ro08(bios, init->offset + 2);
1005 u8 mask = nv_ro08(bios, init->offset + 3);
1006 u8 data = nv_ro08(bios, init->offset + 4);
1007 u32 reg = init_tmds_reg(init, tmds);
1008
1009 trace("TMDS\tT[0x%02x][0x%02x] &= 0x%02x |= 0x%02x\n",
1010 tmds, addr, mask, data);
1011 init->offset += 5;
1012
1013 if (reg == 0)
1014 return;
1015
1016 init_wr32(init, reg + 0, addr | 0x00010000);
1017 init_wr32(init, reg + 4, data | (init_rd32(init, reg + 4) & mask));
1018 init_wr32(init, reg + 0, addr);
1019}
1020
1021/**
1022 * INIT_ZM_TMDS_GROUP - opcode 0x50
1023 *
1024 */
1025static void
1026init_zm_tmds_group(struct nvbios_init *init)
1027{
1028 struct nouveau_bios *bios = init->bios;
1029 u8 tmds = nv_ro08(bios, init->offset + 1);
1030 u8 count = nv_ro08(bios, init->offset + 2);
1031 u32 reg = init_tmds_reg(init, tmds);
1032
1033 trace("TMDS_ZM_GROUP\tT[0x%02x]\n", tmds);
1034 init->offset += 3;
1035
1036 while (count--) {
1037 u8 addr = nv_ro08(bios, init->offset + 0);
1038 u8 data = nv_ro08(bios, init->offset + 1);
1039
1040 trace("\t[0x%02x] = 0x%02x\n", addr, data);
1041 init->offset += 2;
1042
1043 init_wr32(init, reg + 4, data);
1044 init_wr32(init, reg + 0, addr);
1045 }
1046}
1047
1048/**
1049 * INIT_CR_INDEX_ADDRESS_LATCHED - opcode 0x51
1050 *
1051 */
1052static void
1053init_cr_idx_adr_latch(struct nvbios_init *init)
1054{
1055 struct nouveau_bios *bios = init->bios;
1056 u8 addr0 = nv_ro08(bios, init->offset + 1);
1057 u8 addr1 = nv_ro08(bios, init->offset + 2);
1058 u8 base = nv_ro08(bios, init->offset + 3);
1059 u8 count = nv_ro08(bios, init->offset + 4);
1060 u8 save0;
1061
1062 trace("CR_INDEX_ADDR C[%02x] C[%02x]\n", addr0, addr1);
1063 init->offset += 5;
1064
1065 save0 = init_rdvgai(init, 0x03d4, addr0);
1066 while (count--) {
1067 u8 data = nv_ro08(bios, init->offset);
1068
1069 trace("\t\t[0x%02x] = 0x%02x\n", base, data);
1070 init->offset += 1;
1071
1072 init_wrvgai(init, 0x03d4, addr0, base++);
1073 init_wrvgai(init, 0x03d4, addr1, data);
1074 }
1075 init_wrvgai(init, 0x03d4, addr0, save0);
1076}
1077
1078/**
1079 * INIT_CR - opcode 0x52
1080 *
1081 */
1082static void
1083init_cr(struct nvbios_init *init)
1084{
1085 struct nouveau_bios *bios = init->bios;
1086 u8 addr = nv_ro08(bios, init->offset + 1);
1087 u8 mask = nv_ro08(bios, init->offset + 2);
1088 u8 data = nv_ro08(bios, init->offset + 3);
1089 u8 val;
1090
1091 trace("CR\t\tC[0x%02x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
1092 init->offset += 4;
1093
1094 val = init_rdvgai(init, 0x03d4, addr) & mask;
1095 init_wrvgai(init, 0x03d4, addr, val | data);
1096}
1097
1098/**
1099 * INIT_ZM_CR - opcode 0x53
1100 *
1101 */
1102static void
1103init_zm_cr(struct nvbios_init *init)
1104{
1105 struct nouveau_bios *bios = init->bios;
1106 u8 addr = nv_ro08(bios, init->offset + 1);
1107 u8 data = nv_ro08(bios, init->offset + 2);
1108
1109 trace("ZM_CR\tC[0x%02x] = 0x%02x\n", addr, data);
1110 init->offset += 3;
1111
1112 init_wrvgai(init, 0x03d4, addr, data);
1113}
1114
1115/**
1116 * INIT_ZM_CR_GROUP - opcode 0x54
1117 *
1118 */
1119static void
1120init_zm_cr_group(struct nvbios_init *init)
1121{
1122 struct nouveau_bios *bios = init->bios;
1123 u8 count = nv_ro08(bios, init->offset + 1);
1124
1125 trace("ZM_CR_GROUP\n");
1126 init->offset += 2;
1127
1128 while (count--) {
1129 u8 addr = nv_ro08(bios, init->offset + 0);
1130 u8 data = nv_ro08(bios, init->offset + 1);
1131
1132 trace("\t\tC[0x%02x] = 0x%02x\n", addr, data);
1133 init->offset += 2;
1134
1135 init_wrvgai(init, 0x03d4, addr, data);
1136 }
1137}
1138
1139/**
1140 * INIT_CONDITION_TIME - opcode 0x56
1141 *
1142 */
1143static void
1144init_condition_time(struct nvbios_init *init)
1145{
1146 struct nouveau_bios *bios = init->bios;
1147 u8 cond = nv_ro08(bios, init->offset + 1);
1148 u8 retry = nv_ro08(bios, init->offset + 2);
1149 u8 wait = min((u16)retry * 50, 100);
1150
1151 trace("CONDITION_TIME\t0x%02x 0x%02x\n", cond, retry);
1152 init->offset += 3;
1153
1154 if (!init_exec(init))
1155 return;
1156
1157 while (wait--) {
1158 if (init_condition_met(init, cond))
1159 return;
1160 mdelay(20);
1161 }
1162
1163 init_exec_set(init, false);
1164}
1165
1166/**
1167 * INIT_LTIME - opcode 0x57
1168 *
1169 */
1170static void
1171init_ltime(struct nvbios_init *init)
1172{
1173 struct nouveau_bios *bios = init->bios;
1174 u16 msec = nv_ro16(bios, init->offset + 1);
1175
1176 trace("LTIME\t0x%04x\n", msec);
1177 init->offset += 3;
1178
1179 if (init_exec(init))
1180 mdelay(msec);
1181}
1182
1183/**
1184 * INIT_ZM_REG_SEQUENCE - opcode 0x58
1185 *
1186 */
1187static void
1188init_zm_reg_sequence(struct nvbios_init *init)
1189{
1190 struct nouveau_bios *bios = init->bios;
1191 u32 base = nv_ro32(bios, init->offset + 1);
1192 u8 count = nv_ro08(bios, init->offset + 5);
1193
1194 trace("ZM_REG_SEQUENCE\t0x%02x\n", count);
1195 init->offset += 6;
1196
1197 while (count--) {
1198 u32 data = nv_ro32(bios, init->offset);
1199
1200 trace("\t\tR[0x%06x] = 0x%08x\n", base, data);
1201 init->offset += 4;
1202
1203 init_wr32(init, base, data);
1204 base += 4;
1205 }
1206}
1207
1208/**
1209 * INIT_SUB_DIRECT - opcode 0x5b
1210 *
1211 */
1212static void
1213init_sub_direct(struct nvbios_init *init)
1214{
1215 struct nouveau_bios *bios = init->bios;
1216 u16 addr = nv_ro16(bios, init->offset + 1);
1217 u16 save;
1218
1219 trace("SUB_DIRECT\t0x%04x\n", addr);
1220
1221 if (init_exec(init)) {
1222 save = init->offset;
1223 init->offset = addr;
1224 if (nvbios_exec(init)) {
1225 error("error parsing sub-table\n");
1226 return;
1227 }
1228 init->offset = save;
1229 }
1230
1231 init->offset += 3;
1232}
1233
1234/**
1235 * INIT_JUMP - opcode 0x5c
1236 *
1237 */
1238static void
1239init_jump(struct nvbios_init *init)
1240{
1241 struct nouveau_bios *bios = init->bios;
1242 u16 offset = nv_ro16(bios, init->offset + 1);
1243
1244 trace("JUMP\t0x%04x\n", offset);
1245 init->offset = offset;
1246}
1247
1248/**
1249 * INIT_I2C_IF - opcode 0x5e
1250 *
1251 */
1252static void
1253init_i2c_if(struct nvbios_init *init)
1254{
1255 struct nouveau_bios *bios = init->bios;
1256 u8 index = nv_ro08(bios, init->offset + 1);
1257 u8 addr = nv_ro08(bios, init->offset + 2);
1258 u8 reg = nv_ro08(bios, init->offset + 3);
1259 u8 mask = nv_ro08(bios, init->offset + 4);
1260 u8 data = nv_ro08(bios, init->offset + 5);
1261 u8 value;
1262
1263 trace("I2C_IF\tI2C[0x%02x][0x%02x][0x%02x] & 0x%02x == 0x%02x\n",
1264 index, addr, reg, mask, data);
1265 init->offset += 6;
1266 init_exec_force(init, true);
1267
1268 value = init_rdi2cr(init, index, addr, reg);
1269 if ((value & mask) != data)
1270 init_exec_set(init, false);
1271
1272 init_exec_force(init, false);
1273}
1274
1275/**
1276 * INIT_COPY_NV_REG - opcode 0x5f
1277 *
1278 */
1279static void
1280init_copy_nv_reg(struct nvbios_init *init)
1281{
1282 struct nouveau_bios *bios = init->bios;
1283 u32 sreg = nv_ro32(bios, init->offset + 1);
1284 u8 shift = nv_ro08(bios, init->offset + 5);
1285 u32 smask = nv_ro32(bios, init->offset + 6);
1286 u32 sxor = nv_ro32(bios, init->offset + 10);
1287 u32 dreg = nv_ro32(bios, init->offset + 14);
1288 u32 dmask = nv_ro32(bios, init->offset + 18);
1289 u32 data;
1290
1291 trace("COPY_NV_REG\tR[0x%06x] &= 0x%08x |= "
1292 "((R[0x%06x] %s 0x%02x) & 0x%08x ^ 0x%08x)\n",
1293 dreg, dmask, sreg, (shift & 0x80) ? "<<" : ">>",
1294 (shift & 0x80) ? (0x100 - shift) : shift, smask, sxor);
1295 init->offset += 22;
1296
1297 data = init_shift(init_rd32(init, sreg), shift);
1298 init_mask(init, dreg, ~dmask, (data & smask) ^ sxor);
1299}
1300
1301/**
1302 * INIT_ZM_INDEX_IO - opcode 0x62
1303 *
1304 */
1305static void
1306init_zm_index_io(struct nvbios_init *init)
1307{
1308 struct nouveau_bios *bios = init->bios;
1309 u16 port = nv_ro16(bios, init->offset + 1);
1310 u8 index = nv_ro08(bios, init->offset + 3);
1311 u8 data = nv_ro08(bios, init->offset + 4);
1312
1313 trace("ZM_INDEX_IO\tI[0x%04x][0x%02x] = 0x%02x\n", port, index, data);
1314 init->offset += 5;
1315
1316 init_wrvgai(init, port, index, data);
1317}
1318
1319/**
1320 * INIT_COMPUTE_MEM - opcode 0x63
1321 *
1322 */
1323static void
1324init_compute_mem(struct nvbios_init *init)
1325{
1326 struct nouveau_devinit *devinit = nouveau_devinit(init->bios);
1327
1328 trace("COMPUTE_MEM\n");
1329 init->offset += 1;
1330
1331 init_exec_force(init, true);
1332 if (init_exec(init) && devinit->meminit)
1333 devinit->meminit(devinit);
1334 init_exec_force(init, false);
1335}
1336
1337/**
1338 * INIT_RESET - opcode 0x65
1339 *
1340 */
1341static void
1342init_reset(struct nvbios_init *init)
1343{
1344 struct nouveau_bios *bios = init->bios;
1345 u32 reg = nv_ro32(bios, init->offset + 1);
1346 u32 data1 = nv_ro32(bios, init->offset + 5);
1347 u32 data2 = nv_ro32(bios, init->offset + 9);
1348 u32 savepci19;
1349
1350 trace("RESET\tR[0x%08x] = 0x%08x, 0x%08x", reg, data1, data2);
1351 init->offset += 13;
1352 init_exec_force(init, true);
1353
1354 savepci19 = init_mask(init, 0x00184c, 0x00000f00, 0x00000000);
1355 init_wr32(init, reg, data1);
1356 udelay(10);
1357 init_wr32(init, reg, data2);
1358 init_wr32(init, 0x00184c, savepci19);
1359 init_mask(init, 0x001850, 0x00000001, 0x00000000);
1360
1361 init_exec_force(init, false);
1362}
1363
1364/**
1365 * INIT_CONFIGURE_MEM - opcode 0x66
1366 *
1367 */
1368static u16
1369init_configure_mem_clk(struct nvbios_init *init)
1370{
1371 u16 mdata = bmp_mem_init_table(init->bios);
1372 if (mdata)
1373 mdata += (init_rdvgai(init, 0x03d4, 0x3c) >> 4) * 66;
1374 return mdata;
1375}
1376
1377static void
1378init_configure_mem(struct nvbios_init *init)
1379{
1380 struct nouveau_bios *bios = init->bios;
1381 u16 mdata, sdata;
1382 u32 addr, data;
1383
1384 trace("CONFIGURE_MEM\n");
1385 init->offset += 1;
1386
1387 if (bios->version.major > 2) {
1388 init_done(init);
1389 return;
1390 }
1391 init_exec_force(init, true);
1392
1393 mdata = init_configure_mem_clk(init);
1394 sdata = bmp_sdr_seq_table(bios);
1395 if (nv_ro08(bios, mdata) & 0x01)
1396 sdata = bmp_ddr_seq_table(bios);
1397 mdata += 6; /* skip to data */
1398
1399 data = init_rdvgai(init, 0x03c4, 0x01);
1400 init_wrvgai(init, 0x03c4, 0x01, data | 0x20);
1401
1402 while ((addr = nv_ro32(bios, sdata)) != 0xffffffff) {
1403 switch (addr) {
1404 case 0x10021c: /* CKE_NORMAL */
1405 case 0x1002d0: /* CMD_REFRESH */
1406 case 0x1002d4: /* CMD_PRECHARGE */
1407 data = 0x00000001;
1408 break;
1409 default:
1410 data = nv_ro32(bios, mdata);
1411 mdata += 4;
1412 if (data == 0xffffffff)
1413 continue;
1414 break;
1415 }
1416
1417 init_wr32(init, addr, data);
1418 }
1419
1420 init_exec_force(init, false);
1421}
1422
1423/**
1424 * INIT_CONFIGURE_CLK - opcode 0x67
1425 *
1426 */
1427static void
1428init_configure_clk(struct nvbios_init *init)
1429{
1430 struct nouveau_bios *bios = init->bios;
1431 u16 mdata, clock;
1432
1433 trace("CONFIGURE_CLK\n");
1434 init->offset += 1;
1435
1436 if (bios->version.major > 2) {
1437 init_done(init);
1438 return;
1439 }
1440 init_exec_force(init, true);
1441
1442 mdata = init_configure_mem_clk(init);
1443
1444 /* NVPLL */
1445 clock = nv_ro16(bios, mdata + 4) * 10;
1446 init_prog_pll(init, 0x680500, clock);
1447
1448 /* MPLL */
1449 clock = nv_ro16(bios, mdata + 2) * 10;
1450 if (nv_ro08(bios, mdata) & 0x01)
1451 clock *= 2;
1452 init_prog_pll(init, 0x680504, clock);
1453
1454 init_exec_force(init, false);
1455}
1456
1457/**
1458 * INIT_CONFIGURE_PREINIT - opcode 0x68
1459 *
1460 */
1461static void
1462init_configure_preinit(struct nvbios_init *init)
1463{
1464 struct nouveau_bios *bios = init->bios;
1465 u32 strap;
1466
1467 trace("CONFIGURE_PREINIT\n");
1468 init->offset += 1;
1469
1470 if (bios->version.major > 2) {
1471 init_done(init);
1472 return;
1473 }
1474 init_exec_force(init, true);
1475
1476 strap = init_rd32(init, 0x101000);
1477 strap = ((strap << 2) & 0xf0) | ((strap & 0x40) >> 6);
1478 init_wrvgai(init, 0x03d4, 0x3c, strap);
1479
1480 init_exec_force(init, false);
1481}
1482
1483/**
1484 * INIT_IO - opcode 0x69
1485 *
1486 */
1487static void
1488init_io(struct nvbios_init *init)
1489{
1490 struct nouveau_bios *bios = init->bios;
1491 u16 port = nv_ro16(bios, init->offset + 1);
1492 u8 mask = nv_ro16(bios, init->offset + 3);
1493 u8 data = nv_ro16(bios, init->offset + 4);
1494 u8 value;
1495
1496 trace("IO\t\tI[0x%04x] &= 0x%02x |= 0x%02x\n", port, mask, data);
1497 init->offset += 5;
1498
1499 /* ummm.. yes.. should really figure out wtf this is and why it's
1500 * needed some day.. it's almost certainly wrong, but, it also
1501 * somehow makes things work...
1502 */
1503 if (nv_device(init->bios)->card_type >= NV_50 &&
1504 port == 0x03c3 && data == 0x01) {
1505 init_mask(init, 0x614100, 0xf0800000, 0x00800000);
1506 init_mask(init, 0x00e18c, 0x00020000, 0x00020000);
1507 init_mask(init, 0x614900, 0xf0800000, 0x00800000);
1508 init_mask(init, 0x000200, 0x40000000, 0x00000000);
1509 mdelay(10);
1510 init_mask(init, 0x00e18c, 0x00020000, 0x00000000);
1511 init_mask(init, 0x000200, 0x40000000, 0x40000000);
1512 init_wr32(init, 0x614100, 0x00800018);
1513 init_wr32(init, 0x614900, 0x00800018);
1514 mdelay(10);
1515 init_wr32(init, 0x614100, 0x10000018);
1516 init_wr32(init, 0x614900, 0x10000018);
1517 return;
1518 }
1519
1520 value = init_rdport(init, port) & mask;
1521 init_wrport(init, port, data | value);
1522}
1523
1524/**
1525 * INIT_SUB - opcode 0x6b
1526 *
1527 */
1528static void
1529init_sub(struct nvbios_init *init)
1530{
1531 struct nouveau_bios *bios = init->bios;
1532 u8 index = nv_ro08(bios, init->offset + 1);
1533 u16 addr, save;
1534
1535 trace("SUB\t0x%02x\n", index);
1536
1537 addr = init_script(bios, index);
1538 if (addr && init_exec(init)) {
1539 save = init->offset;
1540 init->offset = addr;
1541 if (nvbios_exec(init)) {
1542 error("error parsing sub-table\n");
1543 return;
1544 }
1545 init->offset = save;
1546 }
1547
1548 init->offset += 2;
1549}
1550
1551/**
1552 * INIT_RAM_CONDITION - opcode 0x6d
1553 *
1554 */
1555static void
1556init_ram_condition(struct nvbios_init *init)
1557{
1558 struct nouveau_bios *bios = init->bios;
1559 u8 mask = nv_ro08(bios, init->offset + 1);
1560 u8 value = nv_ro08(bios, init->offset + 2);
1561
1562 trace("RAM_CONDITION\t"
1563 "(R[0x100000] & 0x%02x) == 0x%02x\n", mask, value);
1564 init->offset += 3;
1565
1566 if ((init_rd32(init, 0x100000) & mask) != value)
1567 init_exec_set(init, false);
1568}
1569
1570/**
1571 * INIT_NV_REG - opcode 0x6e
1572 *
1573 */
1574static void
1575init_nv_reg(struct nvbios_init *init)
1576{
1577 struct nouveau_bios *bios = init->bios;
1578 u32 reg = nv_ro32(bios, init->offset + 1);
1579 u32 mask = nv_ro32(bios, init->offset + 5);
1580 u32 data = nv_ro32(bios, init->offset + 9);
1581
1582 trace("NV_REG\tR[0x%06x] &= 0x%08x |= 0x%08x\n", reg, mask, data);
1583 init->offset += 13;
1584
1585 init_mask(init, reg, ~mask, data);
1586}
1587
1588/**
1589 * INIT_MACRO - opcode 0x6f
1590 *
1591 */
1592static void
1593init_macro(struct nvbios_init *init)
1594{
1595 struct nouveau_bios *bios = init->bios;
1596 u8 macro = nv_ro08(bios, init->offset + 1);
1597 u16 table;
1598
1599 trace("MACRO\t0x%02x\n", macro);
1600
1601 table = init_macro_table(init);
1602 if (table) {
1603 u32 addr = nv_ro32(bios, table + (macro * 8) + 0);
1604 u32 data = nv_ro32(bios, table + (macro * 8) + 4);
1605 trace("\t\tR[0x%06x] = 0x%08x\n", addr, data);
1606 init_wr32(init, addr, data);
1607 }
1608
1609 init->offset += 2;
1610}
1611
1612/**
1613 * INIT_RESUME - opcode 0x72
1614 *
1615 */
1616static void
1617init_resume(struct nvbios_init *init)
1618{
1619 trace("RESUME\n");
1620 init->offset += 1;
1621 init_exec_set(init, true);
1622}
1623
1624/**
1625 * INIT_TIME - opcode 0x74
1626 *
1627 */
1628static void
1629init_time(struct nvbios_init *init)
1630{
1631 struct nouveau_bios *bios = init->bios;
1632 u16 usec = nv_ro16(bios, init->offset + 1);
1633
1634 trace("TIME\t0x%04x\n", usec);
1635 init->offset += 3;
1636
1637 if (init_exec(init)) {
1638 if (usec < 1000)
1639 udelay(usec);
1640 else
1641 mdelay((usec + 900) / 1000);
1642 }
1643}
1644
1645/**
1646 * INIT_CONDITION - opcode 0x75
1647 *
1648 */
1649static void
1650init_condition(struct nvbios_init *init)
1651{
1652 struct nouveau_bios *bios = init->bios;
1653 u8 cond = nv_ro08(bios, init->offset + 1);
1654
1655 trace("CONDITION\t0x%02x\n", cond);
1656 init->offset += 2;
1657
1658 if (!init_condition_met(init, cond))
1659 init_exec_set(init, false);
1660}
1661
1662/**
1663 * INIT_IO_CONDITION - opcode 0x76
1664 *
1665 */
1666static void
1667init_io_condition(struct nvbios_init *init)
1668{
1669 struct nouveau_bios *bios = init->bios;
1670 u8 cond = nv_ro08(bios, init->offset + 1);
1671
1672 trace("IO_CONDITION\t0x%02x\n", cond);
1673 init->offset += 2;
1674
1675 if (!init_io_condition_met(init, cond))
1676 init_exec_set(init, false);
1677}
1678
1679/**
1680 * INIT_INDEX_IO - opcode 0x78
1681 *
1682 */
1683static void
1684init_index_io(struct nvbios_init *init)
1685{
1686 struct nouveau_bios *bios = init->bios;
1687 u16 port = nv_ro16(bios, init->offset + 1);
1688 u8 index = nv_ro16(bios, init->offset + 3);
1689 u8 mask = nv_ro08(bios, init->offset + 4);
1690 u8 data = nv_ro08(bios, init->offset + 5);
1691 u8 value;
1692
1693 trace("INDEX_IO\tI[0x%04x][0x%02x] &= 0x%02x |= 0x%02x\n",
1694 port, index, mask, data);
1695 init->offset += 6;
1696
1697 value = init_rdvgai(init, port, index) & mask;
1698 init_wrvgai(init, port, index, data | value);
1699}
1700
1701/**
1702 * INIT_PLL - opcode 0x79
1703 *
1704 */
1705static void
1706init_pll(struct nvbios_init *init)
1707{
1708 struct nouveau_bios *bios = init->bios;
1709 u32 reg = nv_ro32(bios, init->offset + 1);
1710 u32 freq = nv_ro16(bios, init->offset + 5) * 10;
1711
1712 trace("PLL\tR[0x%06x] =PLL= %dkHz\n", reg, freq);
1713 init->offset += 7;
1714
1715 init_prog_pll(init, reg, freq);
1716}
1717
1718/**
1719 * INIT_ZM_REG - opcode 0x7a
1720 *
1721 */
1722static void
1723init_zm_reg(struct nvbios_init *init)
1724{
1725 struct nouveau_bios *bios = init->bios;
1726 u32 addr = nv_ro32(bios, init->offset + 1);
1727 u32 data = nv_ro32(bios, init->offset + 5);
1728
1729 trace("ZM_REG\tR[0x%06x] = 0x%08x\n", addr, data);
1730 init->offset += 9;
1731
1732 if (addr == 0x000200)
1733 data |= 0x00000001;
1734
1735 init_wr32(init, addr, data);
1736}
1737
1738/**
1739 * INIT_RAM_RESTRICT_PLL - opcde 0x87
1740 *
1741 */
1742static void
1743init_ram_restrict_pll(struct nvbios_init *init)
1744{
1745 struct nouveau_bios *bios = init->bios;
1746 u8 type = nv_ro08(bios, init->offset + 1);
1747 u8 count = init_ram_restrict_group_count(init);
1748 u8 strap = init_ram_restrict(init);
1749 u8 cconf;
1750
1751 trace("RAM_RESTRICT_PLL\t0x%02x\n", type);
1752 init->offset += 2;
1753
1754 for (cconf = 0; cconf < count; cconf++) {
1755 u32 freq = nv_ro32(bios, init->offset);
1756
1757 if (cconf == strap) {
1758 trace("%dkHz *\n", freq);
1759 init_prog_pll(init, type, freq);
1760 } else {
1761 trace("%dkHz\n", freq);
1762 }
1763
1764 init->offset += 4;
1765 }
1766}
1767
1768/**
1769 * INIT_GPIO - opcode 0x8e
1770 *
1771 */
1772static void
1773init_gpio(struct nvbios_init *init)
1774{
1775 struct nouveau_gpio *gpio = nouveau_gpio(init->bios);
1776
1777 trace("GPIO\n");
1778 init->offset += 1;
1779
1780 if (init_exec(init) && gpio && gpio->reset)
1781 gpio->reset(gpio);
1782}
1783
1784/**
1785 * INIT_RAM_RESTRICT_ZM_GROUP - opcode 0x8f
1786 *
1787 */
1788static void
1789init_ram_restrict_zm_reg_group(struct nvbios_init *init)
1790{
1791 struct nouveau_bios *bios = init->bios;
1792 u32 addr = nv_ro32(bios, init->offset + 1);
1793 u8 incr = nv_ro08(bios, init->offset + 5);
1794 u8 num = nv_ro08(bios, init->offset + 6);
1795 u8 count = init_ram_restrict_group_count(init);
1796 u8 index = init_ram_restrict(init);
1797 u8 i, j;
1798
1799 trace("RAM_RESTRICT_ZM_REG_GROUP\t"
1800 "R[%08x] 0x%02x 0x%02x\n", addr, incr, num);
1801 init->offset += 7;
1802
1803 for (i = 0; i < num; i++) {
1804 trace("\tR[0x%06x] = {\n", addr);
1805 for (j = 0; j < count; j++) {
1806 u32 data = nv_ro32(bios, init->offset);
1807
1808 if (j == index) {
1809 trace("\t\t0x%08x *\n", data);
1810 init_wr32(init, addr, data);
1811 } else {
1812 trace("\t\t0x%08x\n", data);
1813 }
1814
1815 init->offset += 4;
1816 }
1817 trace("\t}\n");
1818 addr += incr;
1819 }
1820}
1821
1822/**
1823 * INIT_COPY_ZM_REG - opcode 0x90
1824 *
1825 */
1826static void
1827init_copy_zm_reg(struct nvbios_init *init)
1828{
1829 struct nouveau_bios *bios = init->bios;
1830 u32 sreg = nv_ro32(bios, init->offset + 1);
1831 u32 dreg = nv_ro32(bios, init->offset + 5);
1832
1833 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg);
1834 init->offset += 9;
1835
1836 init_wr32(init, dreg, init_rd32(init, sreg));
1837}
1838
1839/**
1840 * INIT_ZM_REG_GROUP - opcode 0x91
1841 *
1842 */
1843static void
1844init_zm_reg_group(struct nvbios_init *init)
1845{
1846 struct nouveau_bios *bios = init->bios;
1847 u32 addr = nv_ro32(bios, init->offset + 1);
1848 u8 count = nv_ro08(bios, init->offset + 5);
1849
1850 trace("ZM_REG_GROUP\tR[0x%06x] =\n");
1851 init->offset += 6;
1852
1853 while (count--) {
1854 u32 data = nv_ro32(bios, init->offset);
1855 trace("\t0x%08x\n", data);
1856 init_wr32(init, addr, data);
1857 init->offset += 4;
1858 }
1859}
1860
1861/**
1862 * INIT_XLAT - opcode 0x96
1863 *
1864 */
1865static void
1866init_xlat(struct nvbios_init *init)
1867{
1868 struct nouveau_bios *bios = init->bios;
1869 u32 saddr = nv_ro32(bios, init->offset + 1);
1870 u8 sshift = nv_ro08(bios, init->offset + 5);
1871 u8 smask = nv_ro08(bios, init->offset + 6);
1872 u8 index = nv_ro08(bios, init->offset + 7);
1873 u32 daddr = nv_ro32(bios, init->offset + 8);
1874 u32 dmask = nv_ro32(bios, init->offset + 12);
1875 u8 shift = nv_ro08(bios, init->offset + 16);
1876 u32 data;
1877
1878 trace("INIT_XLAT\tR[0x%06x] &= 0x%08x |= "
1879 "(X%02x((R[0x%06x] %s 0x%02x) & 0x%02x) << 0x%02x)\n",
1880 daddr, dmask, index, saddr, (sshift & 0x80) ? "<<" : ">>",
1881 (sshift & 0x80) ? (0x100 - sshift) : sshift, smask, shift);
1882 init->offset += 17;
1883
1884 data = init_shift(init_rd32(init, saddr), sshift) & smask;
1885 data = init_xlat_(init, index, data) << shift;
1886 init_mask(init, daddr, ~dmask, data);
1887}
1888
1889/**
1890 * INIT_ZM_MASK_ADD - opcode 0x97
1891 *
1892 */
1893static void
1894init_zm_mask_add(struct nvbios_init *init)
1895{
1896 struct nouveau_bios *bios = init->bios;
1897 u32 addr = nv_ro32(bios, init->offset + 1);
1898 u32 mask = nv_ro32(bios, init->offset + 5);
1899 u32 add = nv_ro32(bios, init->offset + 9);
1900 u32 data;
1901
1902 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
1903 init->offset += 13;
1904
1905 data = init_rd32(init, addr) & mask;
1906 data |= ((data + add) & ~mask);
1907 init_wr32(init, addr, data);
1908}
1909
1910/**
1911 * INIT_AUXCH - opcode 0x98
1912 *
1913 */
1914static void
1915init_auxch(struct nvbios_init *init)
1916{
1917 struct nouveau_bios *bios = init->bios;
1918 u32 addr = nv_ro32(bios, init->offset + 1);
1919 u8 count = nv_ro08(bios, init->offset + 5);
1920
1921 trace("AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
1922 init->offset += 6;
1923
1924 while (count--) {
1925 u8 mask = nv_ro08(bios, init->offset + 0);
1926 u8 data = nv_ro08(bios, init->offset + 1);
1927 trace("\tAUX[0x%08x] &= 0x%02x |= 0x%02x\n", addr, mask, data);
1928 mask = init_rdauxr(init, addr) & mask;
1929 init_wrauxr(init, addr, mask | data);
1930 init->offset += 2;
1931 }
1932}
1933
1934/**
1935 * INIT_AUXCH - opcode 0x99
1936 *
1937 */
1938static void
1939init_zm_auxch(struct nvbios_init *init)
1940{
1941 struct nouveau_bios *bios = init->bios;
1942 u32 addr = nv_ro32(bios, init->offset + 1);
1943 u8 count = nv_ro08(bios, init->offset + 5);
1944
1945 trace("ZM_AUXCH\tAUX[0x%08x] 0x%02x\n", addr, count);
1946 init->offset += 6;
1947
1948 while (count--) {
1949 u8 data = nv_ro08(bios, init->offset + 0);
1950 trace("\tAUX[0x%08x] = 0x%02x\n", addr, data);
1951 init_wrauxr(init, addr, data);
1952 init->offset += 1;
1953 }
1954}
1955
1956/**
1957 * INIT_I2C_LONG_IF - opcode 0x9a
1958 *
1959 */
1960static void
1961init_i2c_long_if(struct nvbios_init *init)
1962{
1963 struct nouveau_bios *bios = init->bios;
1964 u8 index = nv_ro08(bios, init->offset + 1);
1965 u8 addr = nv_ro08(bios, init->offset + 2) >> 1;
1966 u8 reglo = nv_ro08(bios, init->offset + 3);
1967 u8 reghi = nv_ro08(bios, init->offset + 4);
1968 u8 mask = nv_ro08(bios, init->offset + 5);
1969 u8 data = nv_ro08(bios, init->offset + 6);
1970 struct nouveau_i2c_port *port;
1971
1972 trace("I2C_LONG_IF\t"
1973 "I2C[0x%02x][0x%02x][0x%02x%02x] & 0x%02x == 0x%02x\n",
1974 index, addr, reglo, reghi, mask, data);
1975 init->offset += 7;
1976
1977 port = init_i2c(init, index);
1978 if (port) {
1979 u8 i[2] = { reghi, reglo };
1980 u8 o[1] = {};
1981 struct i2c_msg msg[] = {
1982 { .addr = addr, .flags = 0, .len = 2, .buf = i },
1983 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = o }
1984 };
1985 int ret;
1986
1987 ret = i2c_transfer(&port->adapter, msg, 2);
1988 if (ret == 2 && ((o[0] & mask) == data))
1989 return;
1990 }
1991
1992 init_exec_set(init, false);
1993}
1994
1995static struct nvbios_init_opcode {
1996 void (*exec)(struct nvbios_init *);
1997} init_opcode[] = {
1998 [0x32] = { init_io_restrict_prog },
1999 [0x33] = { init_repeat },
2000 [0x34] = { init_io_restrict_pll },
2001 [0x36] = { init_end_repeat },
2002 [0x37] = { init_copy },
2003 [0x38] = { init_not },
2004 [0x39] = { init_io_flag_condition },
2005 [0x3a] = { init_dp_condition },
2006 [0x3b] = { init_io_mask_or },
2007 [0x3c] = { init_io_or },
2008 [0x49] = { init_idx_addr_latched },
2009 [0x4a] = { init_io_restrict_pll2 },
2010 [0x4b] = { init_pll2 },
2011 [0x4c] = { init_i2c_byte },
2012 [0x4d] = { init_zm_i2c_byte },
2013 [0x4e] = { init_zm_i2c },
2014 [0x4f] = { init_tmds },
2015 [0x50] = { init_zm_tmds_group },
2016 [0x51] = { init_cr_idx_adr_latch },
2017 [0x52] = { init_cr },
2018 [0x53] = { init_zm_cr },
2019 [0x54] = { init_zm_cr_group },
2020 [0x56] = { init_condition_time },
2021 [0x57] = { init_ltime },
2022 [0x58] = { init_zm_reg_sequence },
2023 [0x5b] = { init_sub_direct },
2024 [0x5c] = { init_jump },
2025 [0x5e] = { init_i2c_if },
2026 [0x5f] = { init_copy_nv_reg },
2027 [0x62] = { init_zm_index_io },
2028 [0x63] = { init_compute_mem },
2029 [0x65] = { init_reset },
2030 [0x66] = { init_configure_mem },
2031 [0x67] = { init_configure_clk },
2032 [0x68] = { init_configure_preinit },
2033 [0x69] = { init_io },
2034 [0x6b] = { init_sub },
2035 [0x6d] = { init_ram_condition },
2036 [0x6e] = { init_nv_reg },
2037 [0x6f] = { init_macro },
2038 [0x71] = { init_done },
2039 [0x72] = { init_resume },
2040 [0x74] = { init_time },
2041 [0x75] = { init_condition },
2042 [0x76] = { init_io_condition },
2043 [0x78] = { init_index_io },
2044 [0x79] = { init_pll },
2045 [0x7a] = { init_zm_reg },
2046 [0x87] = { init_ram_restrict_pll },
2047 [0x8c] = { init_reserved },
2048 [0x8d] = { init_reserved },
2049 [0x8e] = { init_gpio },
2050 [0x8f] = { init_ram_restrict_zm_reg_group },
2051 [0x90] = { init_copy_zm_reg },
2052 [0x91] = { init_zm_reg_group },
2053 [0x92] = { init_reserved },
2054 [0x96] = { init_xlat },
2055 [0x97] = { init_zm_mask_add },
2056 [0x98] = { init_auxch },
2057 [0x99] = { init_zm_auxch },
2058 [0x9a] = { init_i2c_long_if },
2059};
2060
2061#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
2062
2063int
2064nvbios_exec(struct nvbios_init *init)
2065{
2066 init->nested++;
2067 while (init->offset) {
2068 u8 opcode = nv_ro08(init->bios, init->offset);
2069 if (opcode >= init_opcode_nr || !init_opcode[opcode].exec) {
2070 error("unknown opcode 0x%02x\n", opcode);
2071 return -EINVAL;
2072 }
2073
2074 init_opcode[opcode].exec(init);
2075 }
2076 init->nested--;
2077 return 0;
2078}
2079
2080int
2081nvbios_init(struct nouveau_subdev *subdev, bool execute)
2082{
2083 struct nouveau_bios *bios = nouveau_bios(subdev);
2084 int ret = 0;
2085 int i = -1;
2086 u16 data;
2087
2088 if (execute)
2089 nv_info(bios, "running init tables\n");
2090 while (!ret && (data = (init_script(bios, ++i)))) {
2091 struct nvbios_init init = {
2092 .subdev = subdev,
2093 .bios = bios,
2094 .offset = data,
2095 .outp = NULL,
2096 .crtc = -1,
2097 .execute = execute ? 1 : 0,
2098 };
2099
2100 ret = nvbios_exec(&init);
2101 }
2102
2103 /* the vbios parser will run this right after the normal init
2104 * tables, whereas the binary driver appears to run it later.
2105 */
2106 if (!ret && (data = init_unknown_script(bios))) {
2107 struct nvbios_init init = {
2108 .subdev = subdev,
2109 .bios = bios,
2110 .offset = data,
2111 .outp = NULL,
2112 .crtc = -1,
2113 .execute = execute ? 1 : 0,
2114 };
2115
2116 ret = nvbios_exec(&init);
2117 }
2118
2119 return 0;
2120}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
new file mode 100644
index 000000000000..2610b11a99b3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/mxm.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/mxm.h>
28
29u16
30mxm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr)
31{
32 struct bit_entry x;
33
34 if (bit_entry(bios, 'x', &x)) {
35 nv_debug(bios, "BIT 'x' table not present\n");
36 return 0x0000;
37 }
38
39 *ver = x.version;
40 *hdr = x.length;
41 if (*ver != 1 || *hdr < 3) {
42 nv_warn(bios, "BIT 'x' table %d/%d unknown\n", *ver, *hdr);
43 return 0x0000;
44 }
45
46 return x.offset;
47}
48
49/* These map MXM v2.x digital connection values to the appropriate SOR/link,
50 * hopefully they're correct for all boards within the same chipset...
51 *
52 * MXM v3.x VBIOS are nicer and provide pointers to these tables.
53 */
54static u8 nv84_sor_map[16] = {
55 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
57};
58
59static u8 nv92_sor_map[16] = {
60 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
61 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
62};
63
64static u8 nv94_sor_map[16] = {
65 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
66 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
67};
68
69static u8 nv98_sor_map[16] = {
70 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
71 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
72};
73
74u8
75mxm_sor_map(struct nouveau_bios *bios, u8 conn)
76{
77 u8 ver, hdr;
78 u16 mxm = mxm_table(bios, &ver, &hdr);
79 if (mxm && hdr >= 6) {
80 u16 map = nv_ro16(bios, mxm + 4);
81 if (map) {
82 ver = nv_ro08(bios, map);
83 if (ver == 0x10) {
84 if (conn < nv_ro08(bios, map + 3)) {
85 map += nv_ro08(bios, map + 1);
86 map += conn;
87 return nv_ro08(bios, map);
88 }
89
90 return 0x00;
91 }
92
93 nv_warn(bios, "unknown sor map v%02x\n", ver);
94 }
95 }
96
97 if (bios->version.chip == 0x84 || bios->version.chip == 0x86)
98 return nv84_sor_map[conn];
99 if (bios->version.chip == 0x92)
100 return nv92_sor_map[conn];
101 if (bios->version.chip == 0x94 || bios->version.chip == 0x96)
102 return nv94_sor_map[conn];
103 if (bios->version.chip == 0x98)
104 return nv98_sor_map[conn];
105
106 nv_warn(bios, "missing sor map\n");
107 return 0x00;
108}
109
110u8
111mxm_ddc_map(struct nouveau_bios *bios, u8 port)
112{
113 u8 ver, hdr;
114 u16 mxm = mxm_table(bios, &ver, &hdr);
115 if (mxm && hdr >= 8) {
116 u16 map = nv_ro16(bios, mxm + 6);
117 if (map) {
118 ver = nv_ro08(bios, map);
119 if (ver == 0x10) {
120 if (port < nv_ro08(bios, map + 3)) {
121 map += nv_ro08(bios, map + 1);
122 map += port;
123 return nv_ro08(bios, map);
124 }
125
126 return 0x00;
127 }
128
129 nv_warn(bios, "unknown ddc map v%02x\n", ver);
130 }
131 }
132
133 /* v2.x: directly write port as dcb i2cidx */
134 return (port << 4) | port;
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
new file mode 100644
index 000000000000..bcbb056c2887
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/perf.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/perf.h>
28
29static u16
30perf_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_P;
33 u16 perf = 0x0000;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version <= 2) {
37 perf = nv_ro16(bios, bit_P.offset + 0);
38 if (perf) {
39 *ver = nv_ro08(bios, perf + 0);
40 *hdr = nv_ro08(bios, perf + 1);
41 }
42 } else
43 nv_error(bios, "unknown offset for perf in BIT P %d\n",
44 bit_P.version);
45 }
46
47 if (bios->bmp_offset) {
48 if (nv_ro08(bios, bios->bmp_offset + 6) >= 0x25) {
49 perf = nv_ro16(bios, bios->bmp_offset + 0x94);
50 if (perf) {
51 *hdr = nv_ro08(bios, perf + 0);
52 *ver = nv_ro08(bios, perf + 1);
53 }
54 }
55 }
56
57 return perf;
58}
59
60int
61nvbios_perf_fan_parse(struct nouveau_bios *bios,
62 struct nvbios_perf_fan *fan)
63{
64 u8 ver = 0, hdr = 0, cnt = 0, len = 0;
65 u16 perf = perf_table(bios, &ver, &hdr, &cnt, &len);
66 if (!perf)
67 return -ENODEV;
68
69 if (ver >= 0x20 && ver < 0x40 && hdr > 6)
70 fan->pwm_divisor = nv_ro16(bios, perf + 6);
71 else
72 fan->pwm_divisor = 0;
73
74 return 0;
75}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
new file mode 100644
index 000000000000..5e5f4cddae3c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pll.c
@@ -0,0 +1,417 @@
1/*
2 * Copyright 2005-2006 Erik Waling
3 * Copyright 2006 Stephane Marchesin
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include <subdev/vga.h>
26#include <subdev/bios.h>
27#include <subdev/bios/bit.h>
28#include <subdev/bios/bmp.h>
29#include <subdev/bios/pll.h>
30
31struct pll_mapping {
32 u8 type;
33 u32 reg;
34};
35
36static struct pll_mapping
37nv04_pll_mapping[] = {
38 { PLL_CORE , 0x680500 },
39 { PLL_MEMORY, 0x680504 },
40 { PLL_VPLL0 , 0x680508 },
41 { PLL_VPLL1 , 0x680520 },
42 {}
43};
44
45static struct pll_mapping
46nv40_pll_mapping[] = {
47 { PLL_CORE , 0x004000 },
48 { PLL_MEMORY, 0x004020 },
49 { PLL_VPLL0 , 0x680508 },
50 { PLL_VPLL1 , 0x680520 },
51 {}
52};
53
54static struct pll_mapping
55nv50_pll_mapping[] = {
56 { PLL_CORE , 0x004028 },
57 { PLL_SHADER, 0x004020 },
58 { PLL_UNK03 , 0x004000 },
59 { PLL_MEMORY, 0x004008 },
60 { PLL_UNK40 , 0x00e810 },
61 { PLL_UNK41 , 0x00e818 },
62 { PLL_UNK42 , 0x00e824 },
63 { PLL_VPLL0 , 0x614100 },
64 { PLL_VPLL1 , 0x614900 },
65 {}
66};
67
68static struct pll_mapping
69nv84_pll_mapping[] = {
70 { PLL_CORE , 0x004028 },
71 { PLL_SHADER, 0x004020 },
72 { PLL_MEMORY, 0x004008 },
73 { PLL_VDEC , 0x004030 },
74 { PLL_UNK41 , 0x00e818 },
75 { PLL_VPLL0 , 0x614100 },
76 { PLL_VPLL1 , 0x614900 },
77 {}
78};
79
80static u16
81pll_limits_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
82{
83 struct bit_entry bit_C;
84
85 if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
86 u16 data = nv_ro16(bios, bit_C.offset + 8);
87 if (data) {
88 *ver = nv_ro08(bios, data + 0);
89 *hdr = nv_ro08(bios, data + 1);
90 *len = nv_ro08(bios, data + 2);
91 *cnt = nv_ro08(bios, data + 3);
92 return data;
93 }
94 }
95
96 if (bmp_version(bios) >= 0x0524) {
97 u16 data = nv_ro16(bios, bios->bmp_offset + 142);
98 if (data) {
99 *ver = nv_ro08(bios, data + 0);
100 *hdr = 1;
101 *cnt = 1;
102 *len = 0x18;
103 return data;
104 }
105 }
106
107 *ver = 0x00;
108 return 0x0000;
109}
110
111static struct pll_mapping *
112pll_map(struct nouveau_bios *bios)
113{
114 switch (nv_device(bios)->card_type) {
115 case NV_04:
116 case NV_10:
117 case NV_20:
118 case NV_30:
119 return nv04_pll_mapping;
120 break;
121 case NV_40:
122 return nv40_pll_mapping;
123 case NV_50:
124 if (nv_device(bios)->chipset == 0x50)
125 return nv50_pll_mapping;
126 else
127 if (nv_device(bios)->chipset < 0xa3 ||
128 nv_device(bios)->chipset == 0xaa ||
129 nv_device(bios)->chipset == 0xac)
130 return nv84_pll_mapping;
131 default:
132 return NULL;
133 }
134}
135
136static u16
137pll_map_reg(struct nouveau_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
138{
139 struct pll_mapping *map;
140 u8 hdr, cnt;
141 u16 data;
142
143 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
144 if (data && *ver >= 0x30) {
145 data += hdr;
146 while (cnt--) {
147 if (nv_ro32(bios, data + 3) == reg) {
148 *type = nv_ro08(bios, data + 0);
149 return data;
150 }
151 data += *len;
152 }
153 return 0x0000;
154 }
155
156 map = pll_map(bios);
157 while (map->reg) {
158 if (map->reg == reg && *ver >= 0x20) {
159 u16 addr = (data += hdr);
160 while (cnt--) {
161 if (nv_ro32(bios, data) == map->reg) {
162 *type = map->type;
163 return data;
164 }
165 data += *len;
166 }
167 return addr;
168 } else
169 if (map->reg == reg) {
170 *type = map->type;
171 return data + 1;
172 }
173 map++;
174 }
175
176 return 0x0000;
177}
178
179static u16
180pll_map_type(struct nouveau_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
181{
182 struct pll_mapping *map;
183 u8 hdr, cnt;
184 u16 data;
185
186 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
187 if (data && *ver >= 0x30) {
188 data += hdr;
189 while (cnt--) {
190 if (nv_ro08(bios, data + 0) == type) {
191 *reg = nv_ro32(bios, data + 3);
192 return data;
193 }
194 data += *len;
195 }
196 return 0x0000;
197 }
198
199 map = pll_map(bios);
200 while (map->reg) {
201 if (map->type == type && *ver >= 0x20) {
202 u16 addr = (data += hdr);
203 while (cnt--) {
204 if (nv_ro32(bios, data) == map->reg) {
205 *reg = map->reg;
206 return data;
207 }
208 data += *len;
209 }
210 return addr;
211 } else
212 if (map->type == type) {
213 *reg = map->reg;
214 return data + 1;
215 }
216 map++;
217 }
218
219 return 0x0000;
220}
221
222int
223nvbios_pll_parse(struct nouveau_bios *bios, u32 type, struct nvbios_pll *info)
224{
225 u8 ver, len;
226 u32 reg = type;
227 u16 data;
228
229 if (type > PLL_MAX) {
230 reg = type;
231 data = pll_map_reg(bios, reg, &type, &ver, &len);
232 } else {
233 data = pll_map_type(bios, type, &reg, &ver, &len);
234 }
235
236 if (ver && !data)
237 return -ENOENT;
238
239 memset(info, 0, sizeof(*info));
240 info->type = type;
241 info->reg = reg;
242
243 switch (ver) {
244 case 0x00:
245 break;
246 case 0x10:
247 case 0x11:
248 info->vco1.min_freq = nv_ro32(bios, data + 0);
249 info->vco1.max_freq = nv_ro32(bios, data + 4);
250 info->vco2.min_freq = nv_ro32(bios, data + 8);
251 info->vco2.max_freq = nv_ro32(bios, data + 12);
252 info->vco1.min_inputfreq = nv_ro32(bios, data + 16);
253 info->vco2.min_inputfreq = nv_ro32(bios, data + 20);
254 info->vco1.max_inputfreq = INT_MAX;
255 info->vco2.max_inputfreq = INT_MAX;
256
257 info->max_p = 0x7;
258 info->max_p_usable = 0x6;
259
260 /* these values taken from nv30/31/36 */
261 switch (bios->version.chip) {
262 case 0x36:
263 info->vco1.min_n = 0x5;
264 break;
265 default:
266 info->vco1.min_n = 0x1;
267 break;
268 }
269 info->vco1.max_n = 0xff;
270 info->vco1.min_m = 0x1;
271 info->vco1.max_m = 0xd;
272
273 /*
274 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
275 * table version (apart from nv35)), N2 is compared to
276 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
277 * save a comparison
278 */
279 info->vco2.min_n = 0x4;
280 switch (bios->version.chip) {
281 case 0x30:
282 case 0x35:
283 info->vco2.max_n = 0x1f;
284 break;
285 default:
286 info->vco2.max_n = 0x28;
287 break;
288 }
289 info->vco2.min_m = 0x1;
290 info->vco2.max_m = 0x4;
291 break;
292 case 0x20:
293 case 0x21:
294 info->vco1.min_freq = nv_ro16(bios, data + 4) * 1000;
295 info->vco1.max_freq = nv_ro16(bios, data + 6) * 1000;
296 info->vco2.min_freq = nv_ro16(bios, data + 8) * 1000;
297 info->vco2.max_freq = nv_ro16(bios, data + 10) * 1000;
298 info->vco1.min_inputfreq = nv_ro16(bios, data + 12) * 1000;
299 info->vco2.min_inputfreq = nv_ro16(bios, data + 14) * 1000;
300 info->vco1.max_inputfreq = nv_ro16(bios, data + 16) * 1000;
301 info->vco2.max_inputfreq = nv_ro16(bios, data + 18) * 1000;
302 info->vco1.min_n = nv_ro08(bios, data + 20);
303 info->vco1.max_n = nv_ro08(bios, data + 21);
304 info->vco1.min_m = nv_ro08(bios, data + 22);
305 info->vco1.max_m = nv_ro08(bios, data + 23);
306 info->vco2.min_n = nv_ro08(bios, data + 24);
307 info->vco2.max_n = nv_ro08(bios, data + 25);
308 info->vco2.min_m = nv_ro08(bios, data + 26);
309 info->vco2.max_m = nv_ro08(bios, data + 27);
310
311 info->max_p = nv_ro08(bios, data + 29);
312 info->max_p_usable = info->max_p;
313 if (bios->version.chip < 0x60)
314 info->max_p_usable = 0x6;
315 info->bias_p = nv_ro08(bios, data + 30);
316
317 if (len > 0x22)
318 info->refclk = nv_ro32(bios, data + 31);
319 break;
320 case 0x30:
321 data = nv_ro16(bios, data + 1);
322
323 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
324 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
325 info->vco2.min_freq = nv_ro16(bios, data + 4) * 1000;
326 info->vco2.max_freq = nv_ro16(bios, data + 6) * 1000;
327 info->vco1.min_inputfreq = nv_ro16(bios, data + 8) * 1000;
328 info->vco2.min_inputfreq = nv_ro16(bios, data + 10) * 1000;
329 info->vco1.max_inputfreq = nv_ro16(bios, data + 12) * 1000;
330 info->vco2.max_inputfreq = nv_ro16(bios, data + 14) * 1000;
331 info->vco1.min_n = nv_ro08(bios, data + 16);
332 info->vco1.max_n = nv_ro08(bios, data + 17);
333 info->vco1.min_m = nv_ro08(bios, data + 18);
334 info->vco1.max_m = nv_ro08(bios, data + 19);
335 info->vco2.min_n = nv_ro08(bios, data + 20);
336 info->vco2.max_n = nv_ro08(bios, data + 21);
337 info->vco2.min_m = nv_ro08(bios, data + 22);
338 info->vco2.max_m = nv_ro08(bios, data + 23);
339 info->max_p_usable = info->max_p = nv_ro08(bios, data + 25);
340 info->bias_p = nv_ro08(bios, data + 27);
341 info->refclk = nv_ro32(bios, data + 28);
342 break;
343 case 0x40:
344 info->refclk = nv_ro16(bios, data + 9) * 1000;
345 data = nv_ro16(bios, data + 1);
346
347 info->vco1.min_freq = nv_ro16(bios, data + 0) * 1000;
348 info->vco1.max_freq = nv_ro16(bios, data + 2) * 1000;
349 info->vco1.min_inputfreq = nv_ro16(bios, data + 4) * 1000;
350 info->vco1.max_inputfreq = nv_ro16(bios, data + 6) * 1000;
351 info->vco1.min_m = nv_ro08(bios, data + 8);
352 info->vco1.max_m = nv_ro08(bios, data + 9);
353 info->vco1.min_n = nv_ro08(bios, data + 10);
354 info->vco1.max_n = nv_ro08(bios, data + 11);
355 info->min_p = nv_ro08(bios, data + 12);
356 info->max_p = nv_ro08(bios, data + 13);
357 break;
358 default:
359 nv_error(bios, "unknown pll limits version 0x%02x\n", ver);
360 return -EINVAL;
361 }
362
363 if (!info->refclk) {
364 info->refclk = nv_device(bios)->crystal;
365 if (bios->version.chip == 0x51) {
366 u32 sel_clk = nv_rd32(bios, 0x680524);
367 if ((info->reg == 0x680508 && sel_clk & 0x20) ||
368 (info->reg == 0x680520 && sel_clk & 0x80)) {
369 if (nv_rdvgac(bios, 0, 0x27) < 0xa3)
370 info->refclk = 200000;
371 else
372 info->refclk = 25000;
373 }
374 }
375 }
376
377 /*
378 * By now any valid limit table ought to have set a max frequency for
379 * vco1, so if it's zero it's either a pre limit table bios, or one
380 * with an empty limit table (seen on nv18)
381 */
382 if (!info->vco1.max_freq) {
383 info->vco1.max_freq = nv_ro32(bios, bios->bmp_offset + 67);
384 info->vco1.min_freq = nv_ro32(bios, bios->bmp_offset + 71);
385 if (bmp_version(bios) < 0x0506) {
386 info->vco1.max_freq = 256000;
387 info->vco1.min_freq = 128000;
388 }
389
390 info->vco1.min_inputfreq = 0;
391 info->vco1.max_inputfreq = INT_MAX;
392 info->vco1.min_n = 0x1;
393 info->vco1.max_n = 0xff;
394 info->vco1.min_m = 0x1;
395
396 if (nv_device(bios)->crystal == 13500) {
397 /* nv05 does this, nv11 doesn't, nv10 unknown */
398 if (bios->version.chip < 0x11)
399 info->vco1.min_m = 0x7;
400 info->vco1.max_m = 0xd;
401 } else {
402 if (bios->version.chip < 0x11)
403 info->vco1.min_m = 0x8;
404 info->vco1.max_m = 0xe;
405 }
406
407 if (bios->version.chip < 0x17 ||
408 bios->version.chip == 0x1a ||
409 bios->version.chip == 0x20)
410 info->max_p = 4;
411 else
412 info->max_p = 5;
413 info->max_p_usable = info->max_p;
414 }
415
416 return 0;
417}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
new file mode 100644
index 000000000000..862a08a2ae27
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/therm.h>
28
29static u16
30therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
31{
32 struct bit_entry bit_P;
33 u16 therm = 0;
34
35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 1)
37 therm = nv_ro16(bios, bit_P.offset + 12);
38 else if (bit_P.version == 2)
39 therm = nv_ro16(bios, bit_P.offset + 16);
40 else
41 nv_error(bios,
42 "unknown offset for thermal in BIT P %d\n",
43 bit_P.version);
44 }
45
46 /* exit now if we haven't found the thermal table */
47 if (!therm)
48 return 0x0000;
49
50 *ver = nv_ro08(bios, therm + 0);
51 *hdr = nv_ro08(bios, therm + 1);
52 *len = nv_ro08(bios, therm + 2);
53 *cnt = nv_ro08(bios, therm + 3);
54
55 return therm + nv_ro08(bios, therm + 1);
56}
57
58u16
59nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
60{
61 u8 hdr, cnt;
62 u16 therm = therm_table(bios, ver, &hdr, len, &cnt);
63 if (therm && idx < cnt)
64 return therm + idx * *len;
65 return 0x0000;
66}
67
68int
69nvbios_therm_sensor_parse(struct nouveau_bios *bios,
70 enum nvbios_therm_domain domain,
71 struct nvbios_therm_sensor *sensor)
72{
73 s8 thrs_section, sensor_section, offset;
74 u8 ver, len, i;
75 u16 entry;
76
77 /* we only support the core domain for now */
78 if (domain != NVBIOS_THERM_DOMAIN_CORE)
79 return -EINVAL;
80
81 /* Read the entries from the table */
82 thrs_section = 0;
83 sensor_section = -1;
84 i = 0;
85 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
86 s16 value = nv_ro16(bios, entry + 1);
87
88 switch (nv_ro08(bios, entry + 0)) {
89 case 0x0:
90 thrs_section = value;
91 if (value > 0)
92 return 0; /* we do not try to support ambient */
93 break;
94 case 0x01:
95 sensor_section++;
96 if (sensor_section == 0) {
97 offset = ((s8) nv_ro08(bios, entry + 2)) / 2;
98 sensor->offset_constant = offset;
99 }
100 break;
101
102 case 0x04:
103 if (thrs_section == 0) {
104 sensor->thrs_critical.temp = (value & 0xff0) >> 4;
105 sensor->thrs_critical.hysteresis = value & 0xf;
106 }
107 break;
108
109 case 0x07:
110 if (thrs_section == 0) {
111 sensor->thrs_down_clock.temp = (value & 0xff0) >> 4;
112 sensor->thrs_down_clock.hysteresis = value & 0xf;
113 }
114 break;
115
116 case 0x08:
117 if (thrs_section == 0) {
118 sensor->thrs_fan_boost.temp = (value & 0xff0) >> 4;
119 sensor->thrs_fan_boost.hysteresis = value & 0xf;
120 }
121 break;
122
123 case 0x10:
124 if (sensor_section == 0)
125 sensor->offset_num = value;
126 break;
127
128 case 0x11:
129 if (sensor_section == 0)
130 sensor->offset_den = value;
131 break;
132
133 case 0x12:
134 if (sensor_section == 0)
135 sensor->slope_mult = value;
136 break;
137
138 case 0x13:
139 if (sensor_section == 0)
140 sensor->slope_div = value;
141 break;
142 case 0x32:
143 if (thrs_section == 0) {
144 sensor->thrs_shutdown.temp = (value & 0xff0) >> 4;
145 sensor->thrs_shutdown.hysteresis = value & 0xf;
146 }
147 break;
148 }
149 }
150
151 return 0;
152}
153
154int
155nvbios_therm_fan_parse(struct nouveau_bios *bios,
156 struct nvbios_therm_fan *fan)
157{
158 u8 ver, len, i;
159 u16 entry;
160
161 i = 0;
162 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
163 s16 value = nv_ro16(bios, entry + 1);
164
165 switch (nv_ro08(bios, entry + 0)) {
166 case 0x22:
167 fan->min_duty = value & 0xff;
168 fan->max_duty = (value & 0xff00) >> 8;
169 break;
170 case 0x26:
171 fan->pwm_freq = value;
172 break;
173 }
174 }
175
176 return 0;
177}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
new file mode 100644
index 000000000000..b7fd1151166e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -0,0 +1,359 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nv04_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36powerctrl_1_shift(int chip_version, int reg)
37{
38 int shift = -4;
39
40 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
41 return shift;
42
43 switch (reg) {
44 case 0x680520:
45 shift += 4;
46 case 0x680508:
47 shift += 4;
48 case 0x680504:
49 shift += 4;
50 case 0x680500:
51 shift += 4;
52 }
53
54 /*
55 * the shift for vpll regs is only used for nv3x chips with a single
56 * stage pll
57 */
58 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
59 chip_version == 0x36 || chip_version >= 0x40))
60 shift = -4;
61
62 return shift;
63}
64
65static void
66setPLL_single(struct nv04_clock_priv *priv, u32 reg,
67 struct nouveau_pll_vals *pv)
68{
69 int chip_version = nouveau_bios(priv)->version.chip;
70 uint32_t oldpll = nv_rd32(priv, reg);
71 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
72 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
73 uint32_t saved_powerctrl_1 = 0;
74 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
75
76 if (oldpll == pll)
77 return; /* already set */
78
79 if (shift_powerctrl_1 >= 0) {
80 saved_powerctrl_1 = nv_rd32(priv, 0x001584);
81 nv_wr32(priv, 0x001584,
82 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
83 1 << shift_powerctrl_1);
84 }
85
86 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
87 /* upclock -- write new post divider first */
88 nv_wr32(priv, reg, pv->log2P << 16 | (oldpll & 0xffff));
89 else
90 /* downclock -- write new NM first */
91 nv_wr32(priv, reg, (oldpll & 0xffff0000) | pv->NM1);
92
93 if (chip_version < 0x17 && chip_version != 0x11)
94 /* wait a bit on older chips */
95 msleep(64);
96 nv_rd32(priv, reg);
97
98 /* then write the other half as well */
99 nv_wr32(priv, reg, pll);
100
101 if (shift_powerctrl_1 >= 0)
102 nv_wr32(priv, 0x001584, saved_powerctrl_1);
103}
104
105static uint32_t
106new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
107{
108 bool head_a = (reg1 == 0x680508);
109
110 if (ss) /* single stage pll mode */
111 ramdac580 |= head_a ? 0x00000100 : 0x10000000;
112 else
113 ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;
114
115 return ramdac580;
116}
117
118static void
119setPLL_double_highregs(struct nv04_clock_priv *priv, u32 reg1,
120 struct nouveau_pll_vals *pv)
121{
122 int chip_version = nouveau_bios(priv)->version.chip;
123 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
124 uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
125 uint32_t oldpll1 = nv_rd32(priv, reg1);
126 uint32_t oldpll2 = !nv3035 ? nv_rd32(priv, reg2) : 0;
127 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
128 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
129 uint32_t oldramdac580 = 0, ramdac580 = 0;
130 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
131 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
132 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
133
134 /* model specific additions to generic pll1 and pll2 set up above */
135 if (nv3035) {
136 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
137 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
138 pll2 = 0;
139 }
140 if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
141 oldramdac580 = nv_rd32(priv, 0x680580);
142 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
143 if (oldramdac580 != ramdac580)
144 oldpll1 = ~0; /* force mismatch */
145 if (single_stage)
146 /* magic value used by nvidia in single stage mode */
147 pll2 |= 0x011f;
148 }
149 if (chip_version > 0x70)
150 /* magic bits set by the blob (but not the bios) on g71-73 */
151 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
152
153 if (oldpll1 == pll1 && oldpll2 == pll2)
154 return; /* already set */
155
156 if (shift_powerctrl_1 >= 0) {
157 saved_powerctrl_1 = nv_rd32(priv, 0x001584);
158 nv_wr32(priv, 0x001584,
159 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
160 1 << shift_powerctrl_1);
161 }
162
163 if (chip_version >= 0x40) {
164 int shift_c040 = 14;
165
166 switch (reg1) {
167 case 0x680504:
168 shift_c040 += 2;
169 case 0x680500:
170 shift_c040 += 2;
171 case 0x680520:
172 shift_c040 += 2;
173 case 0x680508:
174 shift_c040 += 2;
175 }
176
177 savedc040 = nv_rd32(priv, 0xc040);
178 if (shift_c040 != 14)
179 nv_wr32(priv, 0xc040, savedc040 & ~(3 << shift_c040));
180 }
181
182 if (oldramdac580 != ramdac580)
183 nv_wr32(priv, 0x680580, ramdac580);
184
185 if (!nv3035)
186 nv_wr32(priv, reg2, pll2);
187 nv_wr32(priv, reg1, pll1);
188
189 if (shift_powerctrl_1 >= 0)
190 nv_wr32(priv, 0x001584, saved_powerctrl_1);
191 if (chip_version >= 0x40)
192 nv_wr32(priv, 0xc040, savedc040);
193}
194
195static void
196setPLL_double_lowregs(struct nv04_clock_priv *priv, u32 NMNMreg,
197 struct nouveau_pll_vals *pv)
198{
199 /* When setting PLLs, there is a merry game of disabling and enabling
200 * various bits of hardware during the process. This function is a
201 * synthesis of six nv4x traces, nearly each card doing a subtly
202 * different thing. With luck all the necessary bits for each card are
203 * combined herein. Without luck it deviates from each card's formula
204 * so as to not work on any :)
205 */
206
207 uint32_t Preg = NMNMreg - 4;
208 bool mpll = Preg == 0x4020;
209 uint32_t oldPval = nv_rd32(priv, Preg);
210 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
211 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
212 0xc << 28 | pv->log2P << 16;
213 uint32_t saved4600 = 0;
214 /* some cards have different maskc040s */
215 uint32_t maskc040 = ~(3 << 14), savedc040;
216 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
217
218 if (nv_rd32(priv, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
219 return;
220
221 if (Preg == 0x4000)
222 maskc040 = ~0x333;
223 if (Preg == 0x4058)
224 maskc040 = ~(0xc << 24);
225
226 if (mpll) {
227 struct nvbios_pll info;
228 uint8_t Pval2;
229
230 if (nvbios_pll_parse(nouveau_bios(priv), Preg, &info))
231 return;
232
233 Pval2 = pv->log2P + info.bias_p;
234 if (Pval2 > info.max_p)
235 Pval2 = info.max_p;
236 Pval |= 1 << 28 | Pval2 << 20;
237
238 saved4600 = nv_rd32(priv, 0x4600);
239 nv_wr32(priv, 0x4600, saved4600 | 8 << 28);
240 }
241 if (single_stage)
242 Pval |= mpll ? 1 << 12 : 1 << 8;
243
244 nv_wr32(priv, Preg, oldPval | 1 << 28);
245 nv_wr32(priv, Preg, Pval & ~(4 << 28));
246 if (mpll) {
247 Pval |= 8 << 20;
248 nv_wr32(priv, 0x4020, Pval & ~(0xc << 28));
249 nv_wr32(priv, 0x4038, Pval & ~(0xc << 28));
250 }
251
252 savedc040 = nv_rd32(priv, 0xc040);
253 nv_wr32(priv, 0xc040, savedc040 & maskc040);
254
255 nv_wr32(priv, NMNMreg, NMNM);
256 if (NMNMreg == 0x4024)
257 nv_wr32(priv, 0x403c, NMNM);
258
259 nv_wr32(priv, Preg, Pval);
260 if (mpll) {
261 Pval &= ~(8 << 20);
262 nv_wr32(priv, 0x4020, Pval);
263 nv_wr32(priv, 0x4038, Pval);
264 nv_wr32(priv, 0x4600, saved4600);
265 }
266
267 nv_wr32(priv, 0xc040, savedc040);
268
269 if (mpll) {
270 nv_wr32(priv, 0x4020, Pval & ~(1 << 28));
271 nv_wr32(priv, 0x4038, Pval & ~(1 << 28));
272 }
273}
274
275int
276nv04_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
277{
278 struct nv04_clock_priv *priv = (void *)clk;
279 struct nouveau_pll_vals pv;
280 struct nvbios_pll info;
281 int ret;
282
283 ret = nvbios_pll_parse(nouveau_bios(priv), type > 0x405c ?
284 type : type - 4, &info);
285 if (ret)
286 return ret;
287
288 ret = clk->pll_calc(clk, &info, freq, &pv);
289 if (!ret)
290 return ret;
291
292 return clk->pll_prog(clk, type, &pv);
293}
294
295int
296nv04_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
297 int clk, struct nouveau_pll_vals *pv)
298{
299 int N1, M1, N2, M2, P;
300 int ret = nv04_pll_calc(clock, info, clk, &N1, &M1, &N2, &M2, &P);
301 if (ret) {
302 pv->refclk = info->refclk;
303 pv->N1 = N1;
304 pv->M1 = M1;
305 pv->N2 = N2;
306 pv->M2 = M2;
307 pv->log2P = P;
308 }
309 return ret;
310}
311
312int
313nv04_clock_pll_prog(struct nouveau_clock *clk, u32 reg1,
314 struct nouveau_pll_vals *pv)
315{
316 struct nv04_clock_priv *priv = (void *)clk;
317 int cv = nouveau_bios(clk)->version.chip;
318
319 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
320 cv >= 0x40) {
321 if (reg1 > 0x405c)
322 setPLL_double_highregs(priv, reg1, pv);
323 else
324 setPLL_double_lowregs(priv, reg1, pv);
325 } else
326 setPLL_single(priv, reg1, pv);
327
328 return 0;
329}
330
331static int
332nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
333 struct nouveau_oclass *oclass, void *data, u32 size,
334 struct nouveau_object **pobject)
335{
336 struct nv04_clock_priv *priv;
337 int ret;
338
339 ret = nouveau_clock_create(parent, engine, oclass, &priv);
340 *pobject = nv_object(priv);
341 if (ret)
342 return ret;
343
344 priv->base.pll_set = nv04_clock_pll_set;
345 priv->base.pll_calc = nv04_clock_pll_calc;
346 priv->base.pll_prog = nv04_clock_pll_prog;
347 return 0;
348}
349
350struct nouveau_oclass
351nv04_clock_oclass = {
352 .handle = NV_SUBDEV(CLOCK, 0x04),
353 .ofuncs = &(struct nouveau_ofuncs) {
354 .ctor = nv04_clock_ctor,
355 .dtor = _nouveau_clock_dtor,
356 .init = _nouveau_clock_init,
357 .fini = _nouveau_clock_fini,
358 },
359};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index c82de98fee0e..a4b2b7ebf9af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,34 +22,38 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#ifndef __NOUVEAU_RAMHT_H__ 25#include <subdev/clock.h>
26#define __NOUVEAU_RAMHT_H__
27 26
28struct nouveau_ramht_entry { 27struct nv40_clock_priv {
29 struct list_head head; 28 struct nouveau_clock base;
30 struct nouveau_channel *channel;
31 struct nouveau_gpuobj *gpuobj;
32 u32 handle;
33}; 29};
34 30
35struct nouveau_ramht { 31static int
36 struct drm_device *dev; 32nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct kref refcount; 33 struct nouveau_oclass *oclass, void *data, u32 size,
38 spinlock_t lock; 34 struct nouveau_object **pobject)
39 struct nouveau_gpuobj *gpuobj; 35{
40 struct list_head entries; 36 struct nv40_clock_priv *priv;
41 int bits; 37 int ret;
42};
43 38
44extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *, 39 ret = nouveau_clock_create(parent, engine, oclass, &priv);
45 struct nouveau_ramht **); 40 *pobject = nv_object(priv);
46extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, 41 if (ret)
47 struct nouveau_channel *unref_channel); 42 return ret;
48 43
49extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, 44 priv->base.pll_set = nv04_clock_pll_set;
50 struct nouveau_gpuobj *); 45 priv->base.pll_calc = nv04_clock_pll_calc;
51extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); 46 priv->base.pll_prog = nv04_clock_pll_prog;
52extern struct nouveau_gpuobj * 47 return 0;
53nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); 48}
54 49
55#endif 50struct nouveau_oclass
51nv40_clock_oclass = {
52 .handle = NV_SUBDEV(CLOCK, 0x40),
53 .ofuncs = &(struct nouveau_ofuncs) {
54 .ctor = nv40_clock_ctor,
55 .dtor = _nouveau_clock_dtor,
56 .init = _nouveau_clock_init,
57 .fini = _nouveau_clock_fini,
58 },
59};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
new file mode 100644
index 000000000000..fd181fbceddb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nv50_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nv50_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nv50_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N1, M1, N2, M2, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret) {
46 nv_error(clk, "failed to retrieve pll data, %d\n", ret);
47 return ret;
48 }
49
50 ret = nv04_pll_calc(clk, &info, freq, &N1, &M1, &N2, &M2, &P);
51 if (!ret) {
52 nv_error(clk, "failed pll calculation\n");
53 return ret;
54 }
55
56 switch (info.type) {
57 case PLL_VPLL0:
58 case PLL_VPLL1:
59 nv_wr32(priv, info.reg + 0, 0x10000611);
60 nv_mask(priv, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1);
61 nv_mask(priv, info.reg + 8, 0x7fff00ff, (P << 28) |
62 (M2 << 16) | N2);
63 break;
64 case PLL_MEMORY:
65 nv_mask(priv, info.reg + 0, 0x01ff0000, (P << 22) |
66 (info.bias_p << 19) |
67 (P << 16));
68 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
69 break;
70 default:
71 nv_mask(priv, info.reg + 0, 0x00070000, (P << 16));
72 nv_wr32(priv, info.reg + 4, (N1 << 8) | M1);
73 break;
74 }
75
76 return 0;
77}
78
79static int
80nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nv50_clock_priv *priv;
85 int ret;
86
87 ret = nouveau_clock_create(parent, engine, oclass, &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91
92 priv->base.pll_set = nv50_clock_pll_set;
93 return 0;
94}
95
96struct nouveau_oclass
97nv50_clock_oclass = {
98 .handle = NV_SUBDEV(CLOCK, 0x50),
99 .ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nv50_clock_ctor,
101 .dtor = _nouveau_clock_dtor,
102 .init = _nouveau_clock_init,
103 .fini = _nouveau_clock_fini,
104 },
105};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
new file mode 100644
index 000000000000..cc8d7d162d7c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nva3_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nva3_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N, fN, M, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret)
46 return ret;
47
48 ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
49 if (ret < 0)
50 return ret;
51
52 switch (info.type) {
53 case PLL_VPLL0:
54 case PLL_VPLL1:
55 nv_wr32(priv, info.reg + 0, 0x50000610);
56 nv_mask(priv, info.reg + 4, 0x003fffff,
57 (P << 16) | (M << 8) | N);
58 nv_wr32(priv, info.reg + 8, fN);
59 break;
60 default:
61 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
62 ret = -EINVAL;
63 break;
64 }
65
66 return ret;
67}
68
69static int
70nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 struct nouveau_oclass *oclass, void *data, u32 size,
72 struct nouveau_object **pobject)
73{
74 struct nva3_clock_priv *priv;
75 int ret;
76
77 ret = nouveau_clock_create(parent, engine, oclass, &priv);
78 *pobject = nv_object(priv);
79 if (ret)
80 return ret;
81
82 priv->base.pll_set = nva3_clock_pll_set;
83 return 0;
84}
85
86struct nouveau_oclass
87nva3_clock_oclass = {
88 .handle = NV_SUBDEV(CLOCK, 0xa3),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nva3_clock_ctor,
91 .dtor = _nouveau_clock_dtor,
92 .init = _nouveau_clock_init,
93 .fini = _nouveau_clock_fini,
94 },
95};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
new file mode 100644
index 000000000000..5ccce0b17bf3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/clock.h>
26#include <subdev/bios.h>
27#include <subdev/bios/pll.h>
28
29#include "pll.h"
30
31struct nvc0_clock_priv {
32 struct nouveau_clock base;
33};
34
35static int
36nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
37{
38 struct nvc0_clock_priv *priv = (void *)clk;
39 struct nouveau_bios *bios = nouveau_bios(priv);
40 struct nvbios_pll info;
41 int N, fN, M, P;
42 int ret;
43
44 ret = nvbios_pll_parse(bios, type, &info);
45 if (ret)
46 return ret;
47
48 ret = nva3_pll_calc(clk, &info, freq, &N, &fN, &M, &P);
49 if (ret < 0)
50 return ret;
51
52 switch (info.type) {
53 case PLL_VPLL0:
54 case PLL_VPLL1:
55 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
56 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
57 nv_wr32(priv, info.reg + 0x10, fN << 16);
58 break;
59 default:
60 nv_warn(priv, "0x%08x/%dKhz unimplemented\n", type, freq);
61 ret = -EINVAL;
62 break;
63 }
64
65 return ret;
66}
67
68static int
69nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 struct nouveau_oclass *oclass, void *data, u32 size,
71 struct nouveau_object **pobject)
72{
73 struct nvc0_clock_priv *priv;
74 int ret;
75
76 ret = nouveau_clock_create(parent, engine, oclass, &priv);
77 *pobject = nv_object(priv);
78 if (ret)
79 return ret;
80
81 priv->base.pll_set = nvc0_clock_pll_set;
82 return 0;
83}
84
85struct nouveau_oclass
86nvc0_clock_oclass = {
87 .handle = NV_SUBDEV(CLOCK, 0xc0),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nvc0_clock_ctor,
90 .dtor = _nouveau_clock_dtor,
91 .init = _nouveau_clock_init,
92 .fini = _nouveau_clock_fini,
93 },
94};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
new file mode 100644
index 000000000000..ef2c0078f337
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pll.h
@@ -0,0 +1,9 @@
1#ifndef __NOUVEAU_PLL_H__
2#define __NOUVEAU_PLL_H__
3
4int nv04_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
5 int *N1, int *M1, int *N2, int *M2, int *P);
6int nva3_pll_calc(struct nouveau_clock *, struct nvbios_pll *, u32 freq,
7 int *N, int *fN, int *M, int *P);
8
9#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
new file mode 100644
index 000000000000..a2ab6d051ba8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnv04.c
@@ -0,0 +1,242 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <subdev/clock.h>
25#include <subdev/bios.h>
26#include <subdev/bios/pll.h>
27
28#include "pll.h"
29
30static int
31getMNP_single(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
32 int *pN, int *pM, int *pP)
33{
34 /* Find M, N and P for a single stage PLL
35 *
36 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
37 * values, but we're too lazy to use those atm
38 *
39 * "clk" parameter in kHz
40 * returns calculated clock
41 */
42 int cv = nouveau_bios(clock)->version.chip;
43 int minvco = info->vco1.min_freq, maxvco = info->vco1.max_freq;
44 int minM = info->vco1.min_m, maxM = info->vco1.max_m;
45 int minN = info->vco1.min_n, maxN = info->vco1.max_n;
46 int minU = info->vco1.min_inputfreq;
47 int maxU = info->vco1.max_inputfreq;
48 int minP = info->min_p;
49 int maxP = info->max_p_usable;
50 int crystal = info->refclk;
51 int M, N, thisP, P;
52 int clkP, calcclk;
53 int delta, bestdelta = INT_MAX;
54 int bestclk = 0;
55
56 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
57 /* possibly correlated with introduction of 27MHz crystal */
58 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
59 if (clk > 250000)
60 maxM = 6;
61 if (clk > 340000)
62 maxM = 2;
63 } else if (cv < 0x40) {
64 if (clk > 150000)
65 maxM = 6;
66 if (clk > 200000)
67 maxM = 4;
68 if (clk > 340000)
69 maxM = 2;
70 }
71
72 P = 1 << maxP;
73 if ((clk * P) < minvco) {
74 minvco = clk * maxP;
75 maxvco = minvco * 2;
76 }
77
78 if (clk + clk/200 > maxvco) /* +0.5% */
79 maxvco = clk + clk/200;
80
81 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
82 for (thisP = minP; thisP <= maxP; thisP++) {
83 P = 1 << thisP;
84 clkP = clk * P;
85
86 if (clkP < minvco)
87 continue;
88 if (clkP > maxvco)
89 return bestclk;
90
91 for (M = minM; M <= maxM; M++) {
92 if (crystal/M < minU)
93 return bestclk;
94 if (crystal/M > maxU)
95 continue;
96
97 /* add crystal/2 to round better */
98 N = (clkP * M + crystal/2) / crystal;
99
100 if (N < minN)
101 continue;
102 if (N > maxN)
103 break;
104
105 /* more rounding additions */
106 calcclk = ((N * crystal + P/2) / P + M/2) / M;
107 delta = abs(calcclk - clk);
108 /* we do an exhaustive search rather than terminating
109 * on an optimality condition...
110 */
111 if (delta < bestdelta) {
112 bestdelta = delta;
113 bestclk = calcclk;
114 *pN = N;
115 *pM = M;
116 *pP = thisP;
117 if (delta == 0) /* except this one */
118 return bestclk;
119 }
120 }
121 }
122
123 return bestclk;
124}
125
126static int
127getMNP_double(struct nouveau_clock *clock, struct nvbios_pll *info, int clk,
128 int *pN1, int *pM1, int *pN2, int *pM2, int *pP)
129{
130 /* Find M, N and P for a two stage PLL
131 *
132 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
133 * values, but we're too lazy to use those atm
134 *
135 * "clk" parameter in kHz
136 * returns calculated clock
137 */
138 int chip_version = nouveau_bios(clock)->version.chip;
139 int minvco1 = info->vco1.min_freq, maxvco1 = info->vco1.max_freq;
140 int minvco2 = info->vco2.min_freq, maxvco2 = info->vco2.max_freq;
141 int minU1 = info->vco1.min_inputfreq, minU2 = info->vco2.min_inputfreq;
142 int maxU1 = info->vco1.max_inputfreq, maxU2 = info->vco2.max_inputfreq;
143 int minM1 = info->vco1.min_m, maxM1 = info->vco1.max_m;
144 int minN1 = info->vco1.min_n, maxN1 = info->vco1.max_n;
145 int minM2 = info->vco2.min_m, maxM2 = info->vco2.max_m;
146 int minN2 = info->vco2.min_n, maxN2 = info->vco2.max_n;
147 int maxlog2P = info->max_p_usable;
148 int crystal = info->refclk;
149 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
150 int M1, N1, M2, N2, log2P;
151 int clkP, calcclk1, calcclk2, calcclkout;
152 int delta, bestdelta = INT_MAX;
153 int bestclk = 0;
154
155 int vco2 = (maxvco2 - maxvco2/200) / 2;
156 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
157 ;
158 clkP = clk << log2P;
159
160 if (maxvco2 < clk + clk/200) /* +0.5% */
161 maxvco2 = clk + clk/200;
162
163 for (M1 = minM1; M1 <= maxM1; M1++) {
164 if (crystal/M1 < minU1)
165 return bestclk;
166 if (crystal/M1 > maxU1)
167 continue;
168
169 for (N1 = minN1; N1 <= maxN1; N1++) {
170 calcclk1 = crystal * N1 / M1;
171 if (calcclk1 < minvco1)
172 continue;
173 if (calcclk1 > maxvco1)
174 break;
175
176 for (M2 = minM2; M2 <= maxM2; M2++) {
177 if (calcclk1/M2 < minU2)
178 break;
179 if (calcclk1/M2 > maxU2)
180 continue;
181
182 /* add calcclk1/2 to round better */
183 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
184 if (N2 < minN2)
185 continue;
186 if (N2 > maxN2)
187 break;
188
189 if (!fixedgain2) {
190 if (chip_version < 0x60)
191 if (N2/M2 < 4 || N2/M2 > 10)
192 continue;
193
194 calcclk2 = calcclk1 * N2 / M2;
195 if (calcclk2 < minvco2)
196 break;
197 if (calcclk2 > maxvco2)
198 continue;
199 } else
200 calcclk2 = calcclk1;
201
202 calcclkout = calcclk2 >> log2P;
203 delta = abs(calcclkout - clk);
204 /* we do an exhaustive search rather than terminating
205 * on an optimality condition...
206 */
207 if (delta < bestdelta) {
208 bestdelta = delta;
209 bestclk = calcclkout;
210 *pN1 = N1;
211 *pM1 = M1;
212 *pN2 = N2;
213 *pM2 = M2;
214 *pP = log2P;
215 if (delta == 0) /* except this one */
216 return bestclk;
217 }
218 }
219 }
220 }
221
222 return bestclk;
223}
224
225int
226nv04_pll_calc(struct nouveau_clock *clk, struct nvbios_pll *info, u32 freq,
227 int *N1, int *M1, int *N2, int *M2, int *P)
228{
229 int ret;
230
231 if (!info->vco2.max_freq) {
232 ret = getMNP_single(clk, info, freq, N1, M1, P);
233 *N2 = 1;
234 *M2 = 1;
235 } else {
236 ret = getMNP_double(clk, info, freq, N1, M1, N2, M2, P);
237 }
238
239 if (!ret)
240 nv_error(clk, "unable to compute acceptable pll values\n");
241 return ret;
242}
diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
index 4d019eb76f7d..eed5c16cf610 100644
--- a/drivers/gpu/drm/nouveau/nv50_calc.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/pllnva3.c
@@ -22,60 +22,43 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <subdev/clock.h>
26#include "nouveau_drv.h" 26#include <subdev/bios.h>
27#include "nouveau_hw.h" 27#include <subdev/bios/pll.h>
28 28
29int 29#include "pll.h"
30nv50_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
31 int *N1, int *M1, int *N2, int *M2, int *P)
32{
33 struct nouveau_pll_vals pll_vals;
34 int ret;
35
36 ret = nouveau_calc_pll_mnp(dev, pll, clk, &pll_vals);
37 if (ret <= 0)
38 return ret;
39
40 *N1 = pll_vals.N1;
41 *M1 = pll_vals.M1;
42 *N2 = pll_vals.N2;
43 *M2 = pll_vals.M2;
44 *P = pll_vals.log2P;
45 return ret;
46}
47 30
48int 31int
49nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk, 32nva3_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
50 int *pN, int *pfN, int *pM, int *P) 33 u32 freq, int *pN, int *pfN, int *pM, int *P)
51{ 34{
52 u32 best_err = ~0, err; 35 u32 best_err = ~0, err;
53 int M, lM, hM, N, fN; 36 int M, lM, hM, N, fN;
54 37
55 *P = pll->vco1.maxfreq / clk; 38 *P = info->vco1.max_freq / freq;
56 if (*P > pll->max_p) 39 if (*P > info->max_p)
57 *P = pll->max_p; 40 *P = info->max_p;
58 if (*P < pll->min_p) 41 if (*P < info->min_p)
59 *P = pll->min_p; 42 *P = info->min_p;
60 43
61 lM = (pll->refclk + pll->vco1.max_inputfreq) / pll->vco1.max_inputfreq; 44 lM = (info->refclk + info->vco1.max_inputfreq) / info->vco1.max_inputfreq;
62 lM = max(lM, (int)pll->vco1.min_m); 45 lM = max(lM, (int)info->vco1.min_m);
63 hM = (pll->refclk + pll->vco1.min_inputfreq) / pll->vco1.min_inputfreq; 46 hM = (info->refclk + info->vco1.min_inputfreq) / info->vco1.min_inputfreq;
64 hM = min(hM, (int)pll->vco1.max_m); 47 hM = min(hM, (int)info->vco1.max_m);
65 48
66 for (M = lM; M <= hM; M++) { 49 for (M = lM; M <= hM; M++) {
67 u32 tmp = clk * *P * M; 50 u32 tmp = freq * *P * M;
68 N = tmp / pll->refclk; 51 N = tmp / info->refclk;
69 fN = tmp % pll->refclk; 52 fN = tmp % info->refclk;
70 if (!pfN && fN >= pll->refclk / 2) 53 if (!pfN && fN >= info->refclk / 2)
71 N++; 54 N++;
72 55
73 if (N < pll->vco1.min_n) 56 if (N < info->vco1.min_n)
74 continue; 57 continue;
75 if (N > pll->vco1.max_n) 58 if (N > info->vco1.max_n)
76 break; 59 break;
77 60
78 err = abs(clk - (pll->refclk * N / M / *P)); 61 err = abs(freq - (info->refclk * N / M / *P));
79 if (err < best_err) { 62 if (err < best_err) {
80 best_err = err; 63 best_err = err;
81 *pN = N; 64 *pN = N;
@@ -83,15 +66,15 @@ nva3_calc_pll(struct drm_device *dev, struct pll_lims *pll, int clk,
83 } 66 }
84 67
85 if (pfN) { 68 if (pfN) {
86 *pfN = (((fN << 13) / pll->refclk) - 4096) & 0xffff; 69 *pfN = (((fN << 13) / info->refclk) - 4096) & 0xffff;
87 return clk; 70 return freq;
88 } 71 }
89 } 72 }
90 73
91 if (unlikely(best_err == ~0)) { 74 if (unlikely(best_err == ~0)) {
92 NV_ERROR(dev, "unable to find matching pll values\n"); 75 nv_error(clock, "unable to find matching pll values\n");
93 return -EINVAL; 76 return -EINVAL;
94 } 77 }
95 78
96 return pll->refclk * *pN / *pM / *P; 79 return info->refclk * *pN / *pM / *P;
97} 80}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
new file mode 100644
index 000000000000..ca9a4648bd8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27#include <core/client.h>
28#include <core/device.h>
29#include <core/option.h>
30
31#include <core/class.h>
32
33#include <subdev/device.h>
34
35static DEFINE_MUTEX(nv_devices_mutex);
36static LIST_HEAD(nv_devices);
37
38struct nouveau_device *
39nouveau_device_find(u64 name)
40{
41 struct nouveau_device *device, *match = NULL;
42 mutex_lock(&nv_devices_mutex);
43 list_for_each_entry(device, &nv_devices, head) {
44 if (device->handle == name) {
45 match = device;
46 break;
47 }
48 }
49 mutex_unlock(&nv_devices_mutex);
50 return match;
51}
52
53/******************************************************************************
54 * nouveau_devobj (0x0080): class implementation
55 *****************************************************************************/
56struct nouveau_devobj {
57 struct nouveau_parent base;
58 struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
59 bool created;
60};
61
62static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
77 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
78 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
79 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
80 [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
81 [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
82 [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
83 [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
84 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
85 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
86 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
87 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
88 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
89 [NVDEV_SUBDEV_NR] = 0,
90};
91
92static int
93nouveau_devobj_ctor(struct nouveau_object *parent,
94 struct nouveau_object *engine,
95 struct nouveau_oclass *oclass, void *data, u32 size,
96 struct nouveau_object **pobject)
97{
98 struct nouveau_client *client = nv_client(parent);
99 struct nouveau_device *device;
100 struct nouveau_devobj *devobj;
101 struct nv_device_class *args = data;
102 u64 disable, boot0, strap;
103 u64 mmio_base, mmio_size;
104 void __iomem *map;
105 int ret, i, c;
106
107 if (size < sizeof(struct nv_device_class))
108 return -EINVAL;
109
110 /* find the device subdev that matches what the client requested */
111 device = nv_device(client->device);
112 if (args->device != ~0) {
113 device = nouveau_device_find(args->device);
114 if (!device)
115 return -ENODEV;
116 }
117
118 ret = nouveau_parent_create(parent, nv_object(device), oclass, 0, NULL,
119 (1ULL << NVDEV_ENGINE_DMAOBJ) |
120 (1ULL << NVDEV_ENGINE_FIFO) |
121 (1ULL << NVDEV_ENGINE_DISP), &devobj);
122 *pobject = nv_object(devobj);
123 if (ret)
124 return ret;
125
126 mmio_base = pci_resource_start(device->pdev, 0);
127 mmio_size = pci_resource_len(device->pdev, 0);
128
129 /* translate api disable mask into internal mapping */
130 disable = args->debug0;
131 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
132 if (args->disable & disable_map[i])
133 disable |= (1ULL << i);
134 }
135
136 /* identify the chipset, and determine classes of subdev/engines */
137 if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
138 !device->card_type) {
139 map = ioremap(mmio_base, 0x102000);
140 if (map == NULL)
141 return -ENOMEM;
142
143 /* switch mmio to cpu's native endianness */
144#ifndef __BIG_ENDIAN
145 if (ioread32_native(map + 0x000004) != 0x00000000)
146#else
147 if (ioread32_native(map + 0x000004) == 0x00000000)
148#endif
149 iowrite32_native(0x01000001, map + 0x000004);
150
151 /* read boot0 and strapping information */
152 boot0 = ioread32_native(map + 0x000000);
153 strap = ioread32_native(map + 0x101000);
154 iounmap(map);
155
156 /* determine chipset and derive architecture from it */
157 if ((boot0 & 0x0f000000) > 0) {
158 device->chipset = (boot0 & 0xff00000) >> 20;
159 switch (device->chipset & 0xf0) {
160 case 0x10: device->card_type = NV_10; break;
161 case 0x20: device->card_type = NV_20; break;
162 case 0x30: device->card_type = NV_30; break;
163 case 0x40:
164 case 0x60: device->card_type = NV_40; break;
165 case 0x50:
166 case 0x80:
167 case 0x90:
168 case 0xa0: device->card_type = NV_50; break;
169 case 0xc0: device->card_type = NV_C0; break;
170 case 0xd0: device->card_type = NV_D0; break;
171 case 0xe0: device->card_type = NV_E0; break;
172 default:
173 break;
174 }
175 } else
176 if ((boot0 & 0xff00fff0) == 0x20004000) {
177 if (boot0 & 0x00f00000)
178 device->chipset = 0x05;
179 else
180 device->chipset = 0x04;
181 device->card_type = NV_04;
182 }
183
184 switch (device->card_type) {
185 case NV_04: ret = nv04_identify(device); break;
186 case NV_10: ret = nv10_identify(device); break;
187 case NV_20: ret = nv20_identify(device); break;
188 case NV_30: ret = nv30_identify(device); break;
189 case NV_40: ret = nv40_identify(device); break;
190 case NV_50: ret = nv50_identify(device); break;
191 case NV_C0:
192 case NV_D0: ret = nvc0_identify(device); break;
193 case NV_E0: ret = nve0_identify(device); break;
194 default:
195 ret = -EINVAL;
196 break;
197 }
198
199 if (ret) {
200 nv_error(device, "unknown chipset, 0x%08x\n", boot0);
201 return ret;
202 }
203
204 nv_info(device, "BOOT0 : 0x%08x\n", boot0);
205 nv_info(device, "Chipset: %s (NV%02X)\n",
206 device->cname, device->chipset);
207 nv_info(device, "Family : NV%02X\n", device->card_type);
208
209 /* determine frequency of timing crystal */
210 if ( device->chipset < 0x17 ||
211 (device->chipset >= 0x20 && device->chipset <= 0x25))
212 strap &= 0x00000040;
213 else
214 strap &= 0x00400040;
215
216 switch (strap) {
217 case 0x00000000: device->crystal = 13500; break;
218 case 0x00000040: device->crystal = 14318; break;
219 case 0x00400000: device->crystal = 27000; break;
220 case 0x00400040: device->crystal = 25000; break;
221 }
222
223 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
224 }
225
226 if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
227 !nv_subdev(device)->mmio) {
228 nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
229 if (!nv_subdev(device)->mmio) {
230 nv_error(device, "unable to map device registers\n");
231 return -ENOMEM;
232 }
233 }
234
235 /* ensure requested subsystems are available for use */
236 for (i = 0, c = 0; i < NVDEV_SUBDEV_NR; i++) {
237 if (!(oclass = device->oclass[i]) || (disable & (1ULL << i)))
238 continue;
239
240 if (!device->subdev[i]) {
241 ret = nouveau_object_ctor(nv_object(device), NULL,
242 oclass, NULL, i,
243 &devobj->subdev[i]);
244 if (ret == -ENODEV)
245 continue;
246 if (ret)
247 return ret;
248
249 if (nv_iclass(devobj->subdev[i], NV_ENGINE_CLASS))
250 nouveau_subdev_reset(devobj->subdev[i]);
251 } else {
252 nouveau_object_ref(device->subdev[i],
253 &devobj->subdev[i]);
254 }
255
256 /* note: can't init *any* subdevs until devinit has been run
257 * due to not knowing exactly what the vbios init tables will
258 * mess with. devinit also can't be run until all of its
259 * dependencies have been created.
260 *
261 * this code delays init of any subdev until all of devinit's
262 * dependencies have been created, and then initialises each
263 * subdev in turn as they're created.
264 */
265 while (i >= NVDEV_SUBDEV_DEVINIT_LAST && c <= i) {
266 struct nouveau_object *subdev = devobj->subdev[c++];
267 if (subdev && !nv_iclass(subdev, NV_ENGINE_CLASS)) {
268 ret = nouveau_object_inc(subdev);
269 if (ret)
270 return ret;
271 }
272 }
273 }
274
275 return 0;
276}
277
278static void
279nouveau_devobj_dtor(struct nouveau_object *object)
280{
281 struct nouveau_devobj *devobj = (void *)object;
282 int i;
283
284 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
285 nouveau_object_ref(NULL, &devobj->subdev[i]);
286
287 nouveau_parent_destroy(&devobj->base);
288}
289
290static int
291nouveau_devobj_init(struct nouveau_object *object)
292{
293 struct nouveau_devobj *devobj = (void *)object;
294 struct nouveau_object *subdev;
295 int ret, i;
296
297 ret = nouveau_parent_init(&devobj->base);
298 if (ret)
299 return ret;
300
301 for (i = 0; devobj->created && i < NVDEV_SUBDEV_NR; i++) {
302 if ((subdev = devobj->subdev[i])) {
303 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
304 ret = nouveau_object_inc(subdev);
305 if (ret)
306 goto fail;
307 }
308 }
309 }
310
311 devobj->created = true;
312 return 0;
313
314fail:
315 for (--i; i >= 0; i--) {
316 if ((subdev = devobj->subdev[i])) {
317 if (!nv_iclass(subdev, NV_ENGINE_CLASS))
318 nouveau_object_dec(subdev, false);
319 }
320 }
321
322 return ret;
323}
324
325static int
326nouveau_devobj_fini(struct nouveau_object *object, bool suspend)
327{
328 struct nouveau_devobj *devobj = (void *)object;
329 struct nouveau_object *subdev;
330 int ret, i;
331
332 for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--) {
333 if ((subdev = devobj->subdev[i])) {
334 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
335 ret = nouveau_object_dec(subdev, suspend);
336 if (ret && suspend)
337 goto fail;
338 }
339 }
340 }
341
342 ret = nouveau_parent_fini(&devobj->base, suspend);
343fail:
344 for (; ret && suspend && i < NVDEV_SUBDEV_NR; i++) {
345 if ((subdev = devobj->subdev[i])) {
346 if (!nv_iclass(subdev, NV_ENGINE_CLASS)) {
347 ret = nouveau_object_inc(subdev);
348 if (ret) {
349 /* XXX */
350 }
351 }
352 }
353 }
354
355 return ret;
356}
357
358static u8
359nouveau_devobj_rd08(struct nouveau_object *object, u32 addr)
360{
361 return nv_rd08(object->engine, addr);
362}
363
364static u16
365nouveau_devobj_rd16(struct nouveau_object *object, u32 addr)
366{
367 return nv_rd16(object->engine, addr);
368}
369
370static u32
371nouveau_devobj_rd32(struct nouveau_object *object, u32 addr)
372{
373 return nv_rd32(object->engine, addr);
374}
375
376static void
377nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data)
378{
379 nv_wr08(object->engine, addr, data);
380}
381
382static void
383nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data)
384{
385 nv_wr16(object->engine, addr, data);
386}
387
388static void
389nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
390{
391 nv_wr32(object->engine, addr, data);
392}
393
394static struct nouveau_ofuncs
395nouveau_devobj_ofuncs = {
396 .ctor = nouveau_devobj_ctor,
397 .dtor = nouveau_devobj_dtor,
398 .init = nouveau_devobj_init,
399 .fini = nouveau_devobj_fini,
400 .rd08 = nouveau_devobj_rd08,
401 .rd16 = nouveau_devobj_rd16,
402 .rd32 = nouveau_devobj_rd32,
403 .wr08 = nouveau_devobj_wr08,
404 .wr16 = nouveau_devobj_wr16,
405 .wr32 = nouveau_devobj_wr32,
406};
407
408/******************************************************************************
409 * nouveau_device: engine functions
410 *****************************************************************************/
411struct nouveau_oclass
412nouveau_device_sclass[] = {
413 { 0x0080, &nouveau_devobj_ofuncs },
414 {}
415};
416
417static void
418nouveau_device_dtor(struct nouveau_object *object)
419{
420 struct nouveau_device *device = (void *)object;
421
422 mutex_lock(&nv_devices_mutex);
423 list_del(&device->head);
424 mutex_unlock(&nv_devices_mutex);
425
426 if (device->base.mmio)
427 iounmap(device->base.mmio);
428
429 nouveau_subdev_destroy(&device->base);
430}
431
432static struct nouveau_oclass
433nouveau_device_oclass = {
434 .handle = NV_SUBDEV(DEVICE, 0x00),
435 .ofuncs = &(struct nouveau_ofuncs) {
436 .dtor = nouveau_device_dtor,
437 },
438};
439
440int
441nouveau_device_create_(struct pci_dev *pdev, u64 name, const char *sname,
442 const char *cfg, const char *dbg,
443 int length, void **pobject)
444{
445 struct nouveau_device *device;
446 int ret = -EEXIST;
447
448 mutex_lock(&nv_devices_mutex);
449 list_for_each_entry(device, &nv_devices, head) {
450 if (device->handle == name)
451 goto done;
452 }
453
454 ret = nouveau_subdev_create_(NULL, NULL, &nouveau_device_oclass, 0,
455 "DEVICE", "device", length, pobject);
456 device = *pobject;
457 if (ret)
458 goto done;
459
460 atomic_set(&nv_object(device)->usecount, 2);
461 device->pdev = pdev;
462 device->handle = name;
463 device->cfgopt = cfg;
464 device->dbgopt = dbg;
465 device->name = sname;
466
467 nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
468 list_add(&device->head, &nv_devices);
469done:
470 mutex_unlock(&nv_devices_mutex);
471 return ret;
472}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
new file mode 100644
index 000000000000..8626d0d6cbbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/i2c.h>
28#include <subdev/clock.h>
29#include <subdev/devinit.h>
30#include <subdev/mc.h>
31#include <subdev/timer.h>
32#include <subdev/fb.h>
33#include <subdev/instmem.h>
34#include <subdev/vm.h>
35
36#include <engine/dmaobj.h>
37#include <engine/fifo.h>
38#include <engine/software.h>
39#include <engine/graph.h>
40#include <engine/disp.h>
41
42int
43nv04_identify(struct nouveau_device *device)
44{
45 switch (device->chipset) {
46 case 0x04:
47 device->cname = "NV04";
48 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
49 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
50 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
51 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
52 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
53 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
54 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
55 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
56 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
57 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
58 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
59 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
60 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
61 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
62 break;
63 case 0x05:
64 device->cname = "NV05";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
69 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
70 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
71 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
73 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
74 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
75 device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
76 device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
77 device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
78 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
79 break;
80 default:
81 nv_fatal(device, "unknown RIVA chipset\n");
82 return -EINVAL;
83 }
84
85 return 0;
86}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
new file mode 100644
index 000000000000..f09accfd0e31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -0,0 +1,195 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/devinit.h>
31#include <subdev/mc.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/disp.h>
42
43int
44nv10_identify(struct nouveau_device *device)
45{
46 switch (device->chipset) {
47 case 0x10:
48 device->cname = "NV10";
49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
50 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
51 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
52 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
53 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
54 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
58 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
59 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
60 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
61 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
62 break;
63 case 0x15:
64 device->cname = "NV15";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
67 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
68 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
69 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
70 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
74 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
75 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
76 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
77 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
78 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
79 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
80 break;
81 case 0x16:
82 device->cname = "NV16";
83 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
84 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
85 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
86 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
87 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
88 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
89 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
90 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
91 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
92 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
93 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
94 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
95 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
96 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
97 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
98 break;
99 case 0x1a:
100 device->cname = "nForce";
101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
102 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
103 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
112 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
113 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
114 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
115 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
116 break;
117 case 0x11:
118 device->cname = "NV11";
119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
120 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
121 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
122 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
124 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
126 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
127 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
128 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
134 break;
135 case 0x17:
136 device->cname = "NV17";
137 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
138 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
139 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
141 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
142 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
144 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
146 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
147 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
148 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
149 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
150 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
152 break;
153 case 0x1f:
154 device->cname = "nForce2";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
166 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
167 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
168 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
169 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
170 break;
171 case 0x18:
172 device->cname = "NV18";
173 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
174 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
175 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
176 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
177 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
178 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
179 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
180 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
181 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
182 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
183 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
184 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
185 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
186 device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
187 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
188 break;
189 default:
190 nv_fatal(device, "unknown Celsius chipset\n");
191 return -EINVAL;
192 }
193
194 return 0;
195}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
new file mode 100644
index 000000000000..5fa58b7369b5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/software.h>
41#include <engine/graph.h>
42#include <engine/disp.h>
43
44int
45nv20_identify(struct nouveau_device *device)
46{
47 switch (device->chipset) {
48 case 0x20:
49 device->cname = "NV20";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
59 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
60 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
61 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
62 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
63 device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
64 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
65 break;
66 case 0x25:
67 device->cname = "NV25";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break;
84 case 0x28:
85 device->cname = "NV28";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
100 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
101 break;
102 case 0x2a:
103 device->cname = "NV2A";
104 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
105 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
106 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
107 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
111 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
115 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
116 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
117 device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
118 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
119 break;
120 default:
121 nv_fatal(device, "unknown Kelvin chipset\n");
122 return -EINVAL;
123 }
124
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
new file mode 100644
index 000000000000..7f4b8fe6cccc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/devinit.h>
31#include <subdev/mc.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34#include <subdev/instmem.h>
35#include <subdev/vm.h>
36
37#include <engine/dmaobj.h>
38#include <engine/fifo.h>
39#include <engine/software.h>
40#include <engine/graph.h>
41#include <engine/mpeg.h>
42#include <engine/disp.h>
43
44int
45nv30_identify(struct nouveau_device *device)
46{
47 switch (device->chipset) {
48 case 0x30:
49 device->cname = "NV30";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
59 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
60 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
61 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
62 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
63 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
64 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
65 break;
66 case 0x35:
67 device->cname = "NV35";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
79 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
80 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
81 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
83 break;
84 case 0x31:
85 device->cname = "NV31";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
97 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
98 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
99 device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
100 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
102 break;
103 case 0x36:
104 device->cname = "NV36";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
112 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
116 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
117 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
118 device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
119 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
120 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
121 break;
122 case 0x34:
123 device->cname = "NV34";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
131 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
135 device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
136 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
137 device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
138 device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
139 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
140 break;
141 default:
142 nv_fatal(device, "unknown Rankine chipset\n");
143 return -EINVAL;
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
new file mode 100644
index 000000000000..42deadca0f0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -0,0 +1,375 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/devinit.h>
32#include <subdev/mc.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35#include <subdev/instmem.h>
36#include <subdev/vm.h>
37
38#include <engine/dmaobj.h>
39#include <engine/fifo.h>
40#include <engine/software.h>
41#include <engine/graph.h>
42#include <engine/mpeg.h>
43#include <engine/disp.h>
44
45int
46nv40_identify(struct nouveau_device *device)
47{
48 switch (device->chipset) {
49 case 0x40:
50 device->cname = "NV40";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
56 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
57 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
61 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
62 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
63 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
64 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
65 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
66 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
67 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
68 break;
69 case 0x41:
70 device->cname = "NV41";
71 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
72 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
73 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
74 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
75 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
79 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
83 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
84 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
85 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
86 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
87 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
88 break;
89 case 0x42:
90 device->cname = "NV42";
91 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
92 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
93 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
94 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
95 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
103 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
104 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
105 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
106 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
107 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
108 break;
109 case 0x43:
110 device->cname = "NV43";
111 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
112 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
113 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
114 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
115 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
123 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
124 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
125 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
126 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
127 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
128 break;
129 case 0x45:
130 device->cname = "NV45";
131 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
132 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
133 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
134 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
135 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
141 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
142 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
143 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
144 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
145 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
146 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
147 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
148 break;
149 case 0x47:
150 device->cname = "G70";
151 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
152 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
153 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
154 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
155 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
163 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
164 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
165 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
166 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
167 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
168 break;
169 case 0x49:
170 device->cname = "G71";
171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
173 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
174 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
183 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
184 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
185 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
186 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
187 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
188 break;
189 case 0x4b:
190 device->cname = "G73";
191 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
192 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
193 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
194 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
195 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
203 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
204 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
205 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
206 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
207 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
208 break;
209 case 0x44:
210 device->cname = "NV44";
211 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
212 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
213 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
214 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
215 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
223 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
224 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
225 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
226 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
227 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
228 break;
229 case 0x46:
230 device->cname = "G72";
231 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
232 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
233 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
234 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
235 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
243 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
244 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
245 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
246 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
247 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
248 break;
249 case 0x4a:
250 device->cname = "NV44A";
251 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
252 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
253 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
254 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
255 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
259 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
263 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
264 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
265 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
266 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
267 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
268 break;
269 case 0x4c:
270 device->cname = "C61";
271 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
272 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
273 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
274 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
275 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
279 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
283 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
284 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
285 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
286 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
287 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
288 break;
289 case 0x4e:
290 device->cname = "C51";
291 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
292 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
293 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
294 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
295 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
299 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
303 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
304 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
305 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
306 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
307 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
308 break;
309 case 0x63:
310 device->cname = "C73";
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
319 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
323 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
324 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
325 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
326 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
328 break;
329 case 0x67:
330 device->cname = "C67";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
339 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
343 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
344 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
345 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
346 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
347 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
348 break;
349 case 0x68:
350 device->cname = "C68";
351 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
352 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
353 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
354 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
355 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
359 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
363 device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
364 device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
365 device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
366 device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
367 device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
368 break;
369 default:
370 nv_fatal(device, "unknown Curie chipset\n");
371 return -EINVAL;
372 }
373
374 return 0;
375}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
new file mode 100644
index 000000000000..fec3bcc9a6fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -0,0 +1,410 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/instmem.h>
37#include <subdev/vm.h>
38#include <subdev/bar.h>
39
40#include <engine/dmaobj.h>
41#include <engine/fifo.h>
42#include <engine/software.h>
43#include <engine/graph.h>
44#include <engine/mpeg.h>
45#include <engine/vp.h>
46#include <engine/crypt.h>
47#include <engine/bsp.h>
48#include <engine/ppp.h>
49#include <engine/copy.h>
50#include <engine/disp.h>
51
52int
53nv50_identify(struct nouveau_device *device)
54{
55 switch (device->chipset) {
56 case 0x50:
57 device->cname = "G80";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
69 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
70 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
71 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
72 device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
73 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
74 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
75 device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
76 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
77 break;
78 case 0x84:
79 device->cname = "G84";
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
81 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
82 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
83 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
84 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
85 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
86 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
87 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
89 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
91 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
92 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
93 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
94 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
95 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
96 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
97 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
102 break;
103 case 0x86:
104 device->cname = "G86";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
110 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
111 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
112 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
113 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
114 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
115 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
116 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
117 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
118 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
119 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
120 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
121 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
122 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
126 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
127 break;
128 case 0x92:
129 device->cname = "G92";
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
131 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
132 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
133 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
134 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
135 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
141 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
142 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
143 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
144 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
145 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
146 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
147 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
152 break;
153 case 0x94:
154 device->cname = "G94";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
160 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
161 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
162 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
163 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
164 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
166 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
167 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
168 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
169 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
170 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
171 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
172 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
176 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
177 break;
178 case 0x96:
179 device->cname = "G96";
180 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
181 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
182 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
183 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
184 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
185 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
186 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
187 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
191 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
192 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
193 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
194 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
195 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
196 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
197 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
201 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
202 break;
203 case 0x98:
204 device->cname = "G98";
205 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
206 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
207 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
208 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
209 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
210 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
211 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
212 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
213 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
214 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
215 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
216 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
217 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
218 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
219 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
220 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
221 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
222 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
227 break;
228 case 0xa0:
229 device->cname = "G200";
230 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
231 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
232 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
233 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
234 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
235 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
242 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
243 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
244 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
245 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
246 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
247 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
251 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
252 break;
253 case 0xaa:
254 device->cname = "MCP77/MCP78";
255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
257 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
261 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
262 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
263 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
264 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
265 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
266 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
267 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
268 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
269 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
270 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
271 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
272 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
277 break;
278 case 0xac:
279 device->cname = "MCP79/MCP7A";
280 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
281 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
282 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
283 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
284 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
285 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
286 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
287 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
288 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
289 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
290 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
291 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
292 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
293 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
294 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
295 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
296 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
297 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
302 break;
303 case 0xa3:
304 device->cname = "GT215";
305 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
306 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
307 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
310 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
311 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
312 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
314 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
316 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
317 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
318 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
319 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
320 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
321 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
322 device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
323 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
328 break;
329 case 0xa5:
330 device->cname = "GT216";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
337 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
338 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
340 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
341 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
342 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
343 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
344 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
345 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
346 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
347 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
348 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
352 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
353 break;
354 case 0xa8:
355 device->cname = "GT218";
356 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
357 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
358 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
359 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
360 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
361 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
362 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
363 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
364 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
365 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
366 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
367 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
368 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
369 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
370 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
371 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
372 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
373 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
377 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
378 break;
379 case 0xaf:
380 device->cname = "MCP89";
381 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
382 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
383 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
384 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
385 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
386 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
387 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
388 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
389 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
390 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
391 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
392 device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
393 device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
394 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
395 device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
396 device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
397 device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
398 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
402 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
403 break;
404 default:
405 nv_fatal(device, "unknown Tesla chipset\n");
406 return -EINVAL;
407 }
408
409 return 0;
410}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
new file mode 100644
index 000000000000..6697f0f9c293
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/ltcg.h>
37#include <subdev/ibus.h>
38#include <subdev/instmem.h>
39#include <subdev/vm.h>
40#include <subdev/bar.h>
41
42#include <engine/dmaobj.h>
43#include <engine/fifo.h>
44#include <engine/software.h>
45#include <engine/graph.h>
46#include <engine/vp.h>
47#include <engine/bsp.h>
48#include <engine/ppp.h>
49#include <engine/copy.h>
50#include <engine/disp.h>
51
52int
53nvc0_identify(struct nouveau_device *device)
54{
55 switch (device->chipset) {
56 case 0xc0:
57 device->cname = "GF100";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
69 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
71 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
72 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
77 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
78 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
79 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
83 break;
84 case 0xc4:
85 device->cname = "GF104";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
91 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
92 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
93 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
94 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
95 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
96 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
97 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
99 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
100 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
101 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
105 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
106 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
107 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
110 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
111 break;
112 case 0xc3:
113 device->cname = "GF106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
125 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
128 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
134 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
135 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
138 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
139 break;
140 case 0xce:
141 device->cname = "GF114";
142 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
143 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
144 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
145 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
146 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
147 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
148 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
149 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
153 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
154 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
155 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
156 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
157 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
161 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
162 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
163 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
166 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
167 break;
168 case 0xcf:
169 device->cname = "GF116";
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
171 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
172 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
175 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
181 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
182 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
183 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
184 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
185 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
189 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
190 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
191 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
194 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
195 break;
196 case 0xc1:
197 device->cname = "GF108";
198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
199 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
200 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
201 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
202 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
203 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
204 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
205 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
206 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
207 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
208 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
209 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
211 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
212 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
213 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
217 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
218 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
223 break;
224 case 0xc8:
225 device->cname = "GF110";
226 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
227 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
228 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
229 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
230 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
231 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
232 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
233 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
234 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
235 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
236 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
237 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
238 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
239 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
240 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
241 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
245 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
246 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
247 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
250 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
251 break;
252 case 0xd9:
253 device->cname = "GF119";
254 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
255 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
256 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
257 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
258 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
259 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
260 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
261 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
262 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
263 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
264 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
265 device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
273 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
278 break;
279 default:
280 nv_fatal(device, "unknown Fermi chipset\n");
281 return -EINVAL;
282 }
283
284 return 0;
285}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
new file mode 100644
index 000000000000..4a280b7ab853
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/device.h>
26#include <subdev/bios.h>
27#include <subdev/gpio.h>
28#include <subdev/i2c.h>
29#include <subdev/clock.h>
30#include <subdev/therm.h>
31#include <subdev/mxm.h>
32#include <subdev/devinit.h>
33#include <subdev/mc.h>
34#include <subdev/timer.h>
35#include <subdev/fb.h>
36#include <subdev/ltcg.h>
37#include <subdev/ibus.h>
38#include <subdev/instmem.h>
39#include <subdev/vm.h>
40#include <subdev/bar.h>
41
42#include <engine/dmaobj.h>
43#include <engine/fifo.h>
44#include <engine/software.h>
45#include <engine/graph.h>
46#include <engine/disp.h>
47#include <engine/copy.h>
48
49int
50nve0_identify(struct nouveau_device *device)
51{
52 switch (device->chipset) {
53 case 0xe4:
54 device->cname = "GK104";
55 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
56 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
57 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
58 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
59 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
60 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
61 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
62 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
63 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
64 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
65 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
66 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
70 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
71 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
72 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
73 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
74 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
75 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
76 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
77 break;
78 case 0xe7:
79 device->cname = "GK107";
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
81 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
82 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
83 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
84 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
85 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
86 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
87 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
89 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
91 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
92 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
93 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
94 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
95 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
96 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
97 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
98 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
99 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
100 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
101 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
102 break;
103 default:
104 nv_fatal(device, "unknown Kepler chipset\n");
105 return -EINVAL;
106 }
107
108 return 0;
109}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 1847963e27f3..5a07a39c1735 100644
--- a/drivers/gpu/drm/nouveau/nv98_ppp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,57 +22,48 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/option.h>
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30 26
31struct nv98_ppp_engine { 27#include <subdev/devinit.h>
32 struct nouveau_exec_engine base; 28#include <subdev/bios.h>
33}; 29#include <subdev/bios/init.h>
34 30
35static int 31int
36nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend) 32nouveau_devinit_init(struct nouveau_devinit *devinit)
37{ 33{
38 if (!(nv_rd32(dev, 0x000200) & 0x00000002)) 34 int ret = nouveau_subdev_init(&devinit->base);
39 return 0; 35 if (ret)
36 return ret;
40 37
41 nv_mask(dev, 0x000200, 0x00000002, 0x00000000); 38 return nvbios_init(&devinit->base, devinit->post);
42 return 0;
43} 39}
44 40
45static int 41int
46nv98_ppp_init(struct drm_device *dev, int engine) 42nouveau_devinit_fini(struct nouveau_devinit *devinit, bool suspend)
47{
48 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
49 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
50 return 0;
51}
52
53static void
54nv98_ppp_destroy(struct drm_device *dev, int engine)
55{ 43{
56 struct nv98_ppp_engine *pppp = nv_engine(dev, engine); 44 /* force full reinit on resume */
45 if (suspend)
46 devinit->post = true;
57 47
58 NVOBJ_ENGINE_DEL(dev, PPP); 48 return nouveau_subdev_fini(&devinit->base, suspend);
59
60 kfree(pppp);
61} 49}
62 50
63int 51int
64nv98_ppp_create(struct drm_device *dev) 52nouveau_devinit_create_(struct nouveau_object *parent,
53 struct nouveau_object *engine,
54 struct nouveau_oclass *oclass,
55 int size, void **pobject)
65{ 56{
66 struct nv98_ppp_engine *pppp; 57 struct nouveau_device *device = nv_device(parent);
67 58 struct nouveau_devinit *devinit;
68 pppp = kzalloc(sizeof(*pppp), GFP_KERNEL); 59 int ret;
69 if (!pppp)
70 return -ENOMEM;
71 60
72 pppp->base.destroy = nv98_ppp_destroy; 61 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "DEVINIT",
73 pppp->base.init = nv98_ppp_init; 62 "init", size, pobject);
74 pppp->base.fini = nv98_ppp_fini; 63 devinit = *pobject;
64 if (ret)
65 return ret;
75 66
76 NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base); 67 devinit->post = nouveau_boolopt(device->cfgopt, "NvForcePost", false);
77 return 0; 68 return 0;
78} 69}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
new file mode 100644
index 000000000000..6b56a0f4cb40
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/fbmem.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#define NV04_PFB_BOOT_0 0x00100000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
30# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
31# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
32# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
33# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
34# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
35# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
36# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
37# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
38# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
39# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
40# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
41# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
42# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
43#define NV04_PFB_DEBUG_0 0x00100080
44# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001
45# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010
46# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00
47# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000
48# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000
49# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000
50# define NV04_PFB_DEBUG_0_CASOE 0x00100000
51# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000
52# define NV04_PFB_DEBUG_0_REFINC 0x20000000
53# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000
54#define NV04_PFB_CFG0 0x00100200
55# define NV04_PFB_CFG0_SCRAMBLE 0x20000000
56#define NV04_PFB_CFG1 0x00100204
57#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
58
59#define NV10_PFB_REFCTRL 0x00100210
60# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
61
62static inline struct io_mapping *
63fbmem_init(struct pci_dev *pdev)
64{
65 return io_mapping_create_wc(pci_resource_start(pdev, 1),
66 pci_resource_len(pdev, 1));
67}
68
69static inline void
70fbmem_fini(struct io_mapping *fb)
71{
72 io_mapping_free(fb);
73}
74
75static inline u32
76fbmem_peek(struct io_mapping *fb, u32 off)
77{
78 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
79 u32 val = ioread32(p + (off & ~PAGE_MASK));
80 io_mapping_unmap_atomic(p);
81 return val;
82}
83
84static inline void
85fbmem_poke(struct io_mapping *fb, u32 off, u32 val)
86{
87 u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
88 iowrite32(val, p + (off & ~PAGE_MASK));
89 wmb();
90 io_mapping_unmap_atomic(p);
91}
92
93static inline bool
94fbmem_readback(struct io_mapping *fb, u32 off, u32 val)
95{
96 fbmem_poke(fb, off, val);
97 return val == fbmem_peek(fb, off);
98}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
new file mode 100644
index 000000000000..7a72d9394340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv04_devinit_priv {
33 struct nouveau_devinit base;
34 int owner;
35};
36
37static void
38nv04_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv04_devinit_priv *priv = (void *)devinit;
41 u32 patt = 0xdeadbeef;
42 struct io_mapping *fb;
43 int i;
44
45 /* Map the framebuffer aperture */
46 fb = fbmem_init(nv_device(priv)->pdev);
47 if (!fb) {
48 nv_error(priv, "failed to map fb\n");
49 return;
50 }
51
52 /* Sequencer and refresh off */
53 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
54 nv_mask(priv, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
55
56 nv_mask(priv, NV04_PFB_BOOT_0, ~0,
57 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
58 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
59 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
60
61 for (i = 0; i < 4; i++)
62 fbmem_poke(fb, 4 * i, patt);
63
64 fbmem_poke(fb, 0x400000, patt + 1);
65
66 if (fbmem_peek(fb, 0) == patt + 1) {
67 nv_mask(priv, NV04_PFB_BOOT_0,
68 NV04_PFB_BOOT_0_RAM_TYPE,
69 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
70 nv_mask(priv, NV04_PFB_DEBUG_0,
71 NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
72
73 for (i = 0; i < 4; i++)
74 fbmem_poke(fb, 4 * i, patt);
75
76 if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
77 nv_mask(priv, NV04_PFB_BOOT_0,
78 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
79 NV04_PFB_BOOT_0_RAM_AMOUNT,
80 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
81 } else
82 if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
83 nv_mask(priv, NV04_PFB_BOOT_0,
84 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
85 NV04_PFB_BOOT_0_RAM_AMOUNT,
86 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
87 } else
88 if (fbmem_peek(fb, 0) != patt) {
89 if (fbmem_readback(fb, 0x800000, patt))
90 nv_mask(priv, NV04_PFB_BOOT_0,
91 NV04_PFB_BOOT_0_RAM_AMOUNT,
92 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
93 else
94 nv_mask(priv, NV04_PFB_BOOT_0,
95 NV04_PFB_BOOT_0_RAM_AMOUNT,
96 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
97
98 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
99 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
100 } else
101 if (!fbmem_readback(fb, 0x800000, patt)) {
102 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
103 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
104
105 }
106
107 /* Refresh on, sequencer on */
108 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
109 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
110 fbmem_fini(fb);
111}
112
113static int
114nv04_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
115 struct nouveau_oclass *oclass, void *data, u32 size,
116 struct nouveau_object **pobject)
117{
118 struct nv04_devinit_priv *priv;
119 int ret;
120
121 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
122 *pobject = nv_object(priv);
123 if (ret)
124 return ret;
125
126 priv->base.meminit = nv04_devinit_meminit;
127 priv->owner = -1;
128 return 0;
129}
130
131void
132nv04_devinit_dtor(struct nouveau_object *object)
133{
134 struct nv04_devinit_priv *priv = (void *)object;
135
136 /* restore vga owner saved at first init, and lock crtc regs */
137 nv_wrvgaowner(priv, priv->owner);
138 nv_lockvgac(priv, true);
139
140 nouveau_devinit_destroy(&priv->base);
141}
142
143int
144nv04_devinit_init(struct nouveau_object *object)
145{
146 struct nv04_devinit_priv *priv = (void *)object;
147
148 if (!priv->base.post) {
149 u32 htotal = nv_rdvgac(priv, 0, 0x06);
150 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x01) << 8;
151 htotal |= (nv_rdvgac(priv, 0, 0x07) & 0x20) << 4;
152 htotal |= (nv_rdvgac(priv, 0, 0x25) & 0x01) << 10;
153 htotal |= (nv_rdvgac(priv, 0, 0x41) & 0x01) << 11;
154 if (!htotal) {
155 nv_info(priv, "adaptor not initialised\n");
156 priv->base.post = true;
157 }
158 }
159
160 return nouveau_devinit_init(&priv->base);
161}
162
163int
164nv04_devinit_fini(struct nouveau_object *object, bool suspend)
165{
166 struct nv04_devinit_priv *priv = (void *)object;
167
168 /* make i2c busses accessible */
169 nv_mask(priv, 0x000200, 0x00000001, 0x00000001);
170
171 /* unlock extended vga crtc regs, and unslave crtcs */
172 nv_lockvgac(priv, false);
173 if (priv->owner < 0)
174 priv->owner = nv_rdvgaowner(priv);
175 nv_wrvgaowner(priv, 0);
176
177 return nouveau_devinit_fini(&priv->base, suspend);
178}
179
180struct nouveau_oclass
181nv04_devinit_oclass = {
182 .handle = NV_SUBDEV(DEVINIT, 0x04),
183 .ofuncs = &(struct nouveau_ofuncs) {
184 .ctor = nv04_devinit_ctor,
185 .dtor = nv04_devinit_dtor,
186 .init = nv04_devinit_init,
187 .fini = nv04_devinit_fini,
188 },
189};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
new file mode 100644
index 000000000000..191447d0d252
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/bios.h>
29#include <subdev/bios/bmp.h>
30#include <subdev/vga.h>
31
32#include "fbmem.h"
33
34struct nv05_devinit_priv {
35 struct nouveau_devinit base;
36 u8 owner;
37};
38
39static void
40nv05_devinit_meminit(struct nouveau_devinit *devinit)
41{
42 static const u8 default_config_tab[][2] = {
43 { 0x24, 0x00 },
44 { 0x28, 0x00 },
45 { 0x24, 0x01 },
46 { 0x1f, 0x00 },
47 { 0x0f, 0x00 },
48 { 0x17, 0x00 },
49 { 0x06, 0x00 },
50 { 0x00, 0x00 }
51 };
52 struct nv05_devinit_priv *priv = (void *)devinit;
53 struct nouveau_bios *bios = nouveau_bios(priv);
54 struct io_mapping *fb;
55 u32 patt = 0xdeadbeef;
56 u16 data;
57 u8 strap, ramcfg[2];
58 int i, v;
59
60 /* Map the framebuffer aperture */
61 fb = fbmem_init(nv_device(priv)->pdev);
62 if (!fb) {
63 nv_error(priv, "failed to map fb\n");
64 return;
65 }
66
67 strap = (nv_rd32(priv, 0x101000) & 0x0000003c) >> 2;
68 if ((data = bmp_mem_init_table(bios))) {
69 ramcfg[0] = nv_ro08(bios, data + 2 * strap + 0);
70 ramcfg[1] = nv_ro08(bios, data + 2 * strap + 1);
71 } else {
72 ramcfg[0] = default_config_tab[strap][0];
73 ramcfg[1] = default_config_tab[strap][1];
74 }
75
76 /* Sequencer off */
77 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) | 0x20);
78
79 if (nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
80 goto out;
81
82 nv_mask(priv, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
83
84 /* If present load the hardcoded scrambling table */
85 if (data) {
86 for (i = 0, data += 0x10; i < 8; i++, data += 4) {
87 u32 scramble = nv_ro32(bios, data);
88 nv_wr32(priv, NV04_PFB_SCRAMBLE(i), scramble);
89 }
90 }
91
92 /* Set memory type/width/length defaults depending on the straps */
93 nv_mask(priv, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
94
95 if (ramcfg[1] & 0x80)
96 nv_mask(priv, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
97
98 nv_mask(priv, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
99 nv_mask(priv, NV04_PFB_CFG1, 0, 1);
100
101 /* Probe memory bus width */
102 for (i = 0; i < 4; i++)
103 fbmem_poke(fb, 4 * i, patt);
104
105 if (fbmem_peek(fb, 0xc) != patt)
106 nv_mask(priv, NV04_PFB_BOOT_0,
107 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
108
109 /* Probe memory length */
110 v = nv_rd32(priv, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
111
112 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
113 (!fbmem_readback(fb, 0x1000000, ++patt) ||
114 !fbmem_readback(fb, 0, ++patt)))
115 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
116 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
117
118 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
119 !fbmem_readback(fb, 0x800000, ++patt))
120 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
121 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
122
123 if (!fbmem_readback(fb, 0x400000, ++patt))
124 nv_mask(priv, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
125 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
126
127out:
128 /* Sequencer on */
129 nv_wrvgas(priv, 0, 1, nv_rdvgas(priv, 0, 1) & ~0x20);
130 fbmem_fini(fb);
131}
132
133static int
134nv05_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
135 struct nouveau_oclass *oclass, void *data, u32 size,
136 struct nouveau_object **pobject)
137{
138 struct nv05_devinit_priv *priv;
139 int ret;
140
141 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
142 *pobject = nv_object(priv);
143 if (ret)
144 return ret;
145
146 priv->base.meminit = nv05_devinit_meminit;
147 return 0;
148}
149
150struct nouveau_oclass
151nv05_devinit_oclass = {
152 .handle = NV_SUBDEV(DEVINIT, 0x05),
153 .ofuncs = &(struct nouveau_ofuncs) {
154 .ctor = nv05_devinit_ctor,
155 .dtor = nv04_devinit_dtor,
156 .init = nv04_devinit_init,
157 .fini = nv04_devinit_fini,
158 },
159};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
new file mode 100644
index 000000000000..eb76ffab6b0c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv10_devinit_priv {
33 struct nouveau_devinit base;
34 u8 owner;
35};
36
37static void
38nv10_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv10_devinit_priv *priv = (void *)devinit;
41 const int mem_width[] = { 0x10, 0x00, 0x20 };
42 const int mem_width_count = nv_device(priv)->chipset >= 0x17 ? 3 : 2;
43 uint32_t patt = 0xdeadbeef;
44 struct io_mapping *fb;
45 int i, j, k;
46
47 /* Map the framebuffer aperture */
48 fb = fbmem_init(nv_device(priv)->pdev);
49 if (!fb) {
50 nv_error(priv, "failed to map fb\n");
51 return;
52 }
53
54 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
55
56 /* Probe memory bus width */
57 for (i = 0; i < mem_width_count; i++) {
58 nv_mask(priv, NV04_PFB_CFG0, 0x30, mem_width[i]);
59
60 for (j = 0; j < 4; j++) {
61 for (k = 0; k < 4; k++)
62 fbmem_poke(fb, 0x1c, 0);
63
64 fbmem_poke(fb, 0x1c, patt);
65 fbmem_poke(fb, 0x3c, 0);
66
67 if (fbmem_peek(fb, 0x1c) == patt)
68 goto mem_width_found;
69 }
70 }
71
72mem_width_found:
73 patt <<= 1;
74
75 /* Probe amount of installed memory */
76 for (i = 0; i < 4; i++) {
77 int off = nv_rd32(priv, 0x10020c) - 0x100000;
78
79 fbmem_poke(fb, off, patt);
80 fbmem_poke(fb, 0, 0);
81
82 fbmem_peek(fb, 0);
83 fbmem_peek(fb, 0);
84 fbmem_peek(fb, 0);
85 fbmem_peek(fb, 0);
86
87 if (fbmem_peek(fb, off) == patt)
88 goto amount_found;
89 }
90
91 /* IC missing - disable the upper half memory space. */
92 nv_mask(priv, NV04_PFB_CFG0, 0x1000, 0);
93
94amount_found:
95 fbmem_fini(fb);
96}
97
98static int
99nv10_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 struct nouveau_oclass *oclass, void *data, u32 size,
101 struct nouveau_object **pobject)
102{
103 struct nv10_devinit_priv *priv;
104 int ret;
105
106 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
107 *pobject = nv_object(priv);
108 if (ret)
109 return ret;
110
111 priv->base.meminit = nv10_devinit_meminit;
112 return 0;
113}
114
115struct nouveau_oclass
116nv10_devinit_oclass = {
117 .handle = NV_SUBDEV(DEVINIT, 0x10),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv10_devinit_ctor,
120 .dtor = nv04_devinit_dtor,
121 .init = nv04_devinit_init,
122 .fini = nv04_devinit_fini,
123 },
124};
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 326bf5e2035a..5b2ba630d913 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2009 Red Hat Inc. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -18,42 +18,41 @@
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
21 */ 23 */
22 24
23#ifndef __NOUVEAU_I2C_H__ 25#include <subdev/devinit.h>
24#define __NOUVEAU_I2C_H__ 26#include <subdev/vga.h>
25
26#include <linux/i2c.h>
27#include <linux/i2c-algo-bit.h>
28#include <drm/drm_dp_helper.h>
29
30#define NV_I2C_PORT(n) (0x00 + (n))
31#define NV_I2C_PORT_NUM 0x10
32#define NV_I2C_DEFAULT(n) (0x80 + (n))
33 27
34struct nouveau_i2c_chan { 28struct nv1a_devinit_priv {
35 struct i2c_adapter adapter; 29 struct nouveau_devinit base;
36 struct drm_device *dev; 30 u8 owner;
37 struct i2c_algo_bit_data bit;
38 struct list_head head;
39 u8 index;
40 u8 type;
41 u32 dcb;
42 u32 drive;
43 u32 sense;
44 u32 state;
45}; 31};
46 32
47int nouveau_i2c_init(struct drm_device *); 33static int
48void nouveau_i2c_fini(struct drm_device *); 34nv1a_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
49struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index); 35 struct nouveau_oclass *oclass, void *data, u32 size,
50bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); 36 struct nouveau_object **pobject)
51int nouveau_i2c_identify(struct drm_device *dev, const char *what, 37{
52 struct i2c_board_info *info, 38 struct nv1a_devinit_priv *priv;
53 bool (*match)(struct nouveau_i2c_chan *, 39 int ret;
54 struct i2c_board_info *),
55 int index);
56 40
57extern const struct i2c_algorithm nouveau_dp_i2c_algo; 41 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
58 45
59#endif /* __NOUVEAU_I2C_H__ */ 46 return 0;
47}
48
49struct nouveau_oclass
50nv1a_devinit_oclass = {
51 .handle = NV_SUBDEV(DEVINIT, 0x1a),
52 .ofuncs = &(struct nouveau_ofuncs) {
53 .ctor = nv1a_devinit_ctor,
54 .dtor = nv04_devinit_dtor,
55 .init = nv04_devinit_init,
56 .fini = nv04_devinit_fini,
57 },
58};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
new file mode 100644
index 000000000000..eb32e99005e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/devinit.h>
28#include <subdev/vga.h>
29
30#include "fbmem.h"
31
32struct nv20_devinit_priv {
33 struct nouveau_devinit base;
34 u8 owner;
35};
36
37static void
38nv20_devinit_meminit(struct nouveau_devinit *devinit)
39{
40 struct nv20_devinit_priv *priv = (void *)devinit;
41 struct nouveau_device *device = nv_device(priv);
42 uint32_t mask = (device->chipset >= 0x25 ? 0x300 : 0x900);
43 uint32_t amount, off;
44 struct io_mapping *fb;
45
46 /* Map the framebuffer aperture */
47 fb = fbmem_init(nv_device(priv)->pdev);
48 if (!fb) {
49 nv_error(priv, "failed to map fb\n");
50 return;
51 }
52
53 nv_wr32(priv, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
54
55 /* Allow full addressing */
56 nv_mask(priv, NV04_PFB_CFG0, 0, mask);
57
58 amount = nv_rd32(priv, 0x10020c);
59 for (off = amount; off > 0x2000000; off -= 0x2000000)
60 fbmem_poke(fb, off - 4, off);
61
62 amount = nv_rd32(priv, 0x10020c);
63 if (amount != fbmem_peek(fb, amount - 4))
64 /* IC missing - disable the upper half memory space. */
65 nv_mask(priv, NV04_PFB_CFG0, mask, 0);
66
67 fbmem_fini(fb);
68}
69
70static int
71nv20_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
72 struct nouveau_oclass *oclass, void *data, u32 size,
73 struct nouveau_object **pobject)
74{
75 struct nv20_devinit_priv *priv;
76 int ret;
77
78 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
79 *pobject = nv_object(priv);
80 if (ret)
81 return ret;
82
83 priv->base.meminit = nv20_devinit_meminit;
84 return 0;
85}
86
87struct nouveau_oclass
88nv20_devinit_oclass = {
89 .handle = NV_SUBDEV(DEVINIT, 0x20),
90 .ofuncs = &(struct nouveau_ofuncs) {
91 .ctor = nv20_devinit_ctor,
92 .dtor = nv04_devinit_dtor,
93 .init = nv04_devinit_init,
94 .fini = nv04_devinit_fini,
95 },
96};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
new file mode 100644
index 000000000000..61becfa732e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/devinit.h>
26#include <subdev/vga.h>
27
28struct nv50_devinit_priv {
29 struct nouveau_devinit base;
30};
31
32static int
33nv50_devinit_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, void *data, u32 size,
35 struct nouveau_object **pobject)
36{
37 struct nv50_devinit_priv *priv;
38 int ret;
39
40 ret = nouveau_devinit_create(parent, engine, oclass, &priv);
41 *pobject = nv_object(priv);
42 if (ret)
43 return ret;
44
45 return 0;
46}
47
48static void
49nv50_devinit_dtor(struct nouveau_object *object)
50{
51 struct nv50_devinit_priv *priv = (void *)object;
52 nouveau_devinit_destroy(&priv->base);
53}
54
55static int
56nv50_devinit_init(struct nouveau_object *object)
57{
58 struct nv50_devinit_priv *priv = (void *)object;
59
60 if (!priv->base.post) {
61 if (!nv_rdvgac(priv, 0, 0x00) &&
62 !nv_rdvgac(priv, 0, 0x1a)) {
63 nv_info(priv, "adaptor not initialised\n");
64 priv->base.post = true;
65 }
66 }
67
68 return nouveau_devinit_init(&priv->base);
69}
70
71static int
72nv50_devinit_fini(struct nouveau_object *object, bool suspend)
73{
74 struct nv50_devinit_priv *priv = (void *)object;
75 return nouveau_devinit_fini(&priv->base, suspend);
76}
77
78struct nouveau_oclass
79nv50_devinit_oclass = {
80 .handle = NV_SUBDEV(DEVINIT, 0x50),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv50_devinit_ctor,
83 .dtor = nv50_devinit_dtor,
84 .init = nv50_devinit_init,
85 .fini = nv50_devinit_fini,
86 },
87};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
new file mode 100644
index 000000000000..f0086de8af31
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -0,0 +1,130 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/fb.h"
26#include "subdev/bios.h"
27#include "subdev/bios/bit.h"
28
29int
30nouveau_fb_bios_memtype(struct nouveau_bios *bios)
31{
32 struct bit_entry M;
33 u8 ramcfg;
34
35 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
36 if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) {
37 u16 table = nv_ro16(bios, M.offset + 3);
38 u8 version = nv_ro08(bios, table + 0);
39 u8 header = nv_ro08(bios, table + 1);
40 u8 record = nv_ro08(bios, table + 2);
41 u8 entries = nv_ro08(bios, table + 3);
42 if (table && version == 0x10 && ramcfg < entries) {
43 u16 entry = table + header + (ramcfg * record);
44 switch (nv_ro08(bios, entry) & 0x0f) {
45 case 0: return NV_MEM_TYPE_DDR2;
46 case 1: return NV_MEM_TYPE_DDR3;
47 case 2: return NV_MEM_TYPE_GDDR3;
48 case 3: return NV_MEM_TYPE_GDDR5;
49 default:
50 break;
51 }
52
53 }
54 }
55
56 return NV_MEM_TYPE_UNKNOWN;
57}
58
59int
60nouveau_fb_init(struct nouveau_fb *pfb)
61{
62 int ret, i;
63
64 ret = nouveau_subdev_init(&pfb->base);
65 if (ret)
66 return ret;
67
68 for (i = 0; i < pfb->tile.regions; i++)
69 pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
70
71 return 0;
72}
73
74int
75_nouveau_fb_init(struct nouveau_object *object)
76{
77 struct nouveau_fb *pfb = (void *)object;
78 return nouveau_fb_init(pfb);
79}
80
81void
82nouveau_fb_destroy(struct nouveau_fb *pfb)
83{
84 int i;
85
86 for (i = 0; i < pfb->tile.regions; i++)
87 pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
88
89 if (pfb->tags.block_size)
90 nouveau_mm_fini(&pfb->tags);
91
92 if (pfb->vram.block_size)
93 nouveau_mm_fini(&pfb->vram);
94
95 nouveau_subdev_destroy(&pfb->base);
96}
97
98void
99_nouveau_fb_dtor(struct nouveau_object *object)
100{
101 struct nouveau_fb *pfb = (void *)object;
102 nouveau_fb_destroy(pfb);
103}
104
105int
106nouveau_fb_created(struct nouveau_fb *pfb)
107{
108 static const char *name[] = {
109 [NV_MEM_TYPE_UNKNOWN] = "unknown",
110 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
111 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
112 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
113 [NV_MEM_TYPE_DDR1 ] = "DDR1",
114 [NV_MEM_TYPE_DDR2 ] = "DDR2",
115 [NV_MEM_TYPE_DDR3 ] = "DDR3",
116 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
117 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
118 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
119 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
120 };
121
122 if (pfb->ram.size == 0) {
123 nv_fatal(pfb, "no vram detected!!\n");
124 return -ERANGE;
125 }
126
127 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
128 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
129 return 0;
130}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
new file mode 100644
index 000000000000..eb06836b69f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -0,0 +1,130 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26
27#define NV04_PFB_BOOT_0 0x00100000
28# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
29# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
30# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
31# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
32# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
33# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
34# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
35# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
36# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
37# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
38# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
39# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
40# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
41# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
42# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
43#define NV04_PFB_CFG0 0x00100200
44
45struct nv04_fb_priv {
46 struct nouveau_fb base;
47};
48
49bool
50nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
51{
52 if (!(tile_flags & 0xff00))
53 return true;
54
55 return false;
56}
57
58static int
59nv04_fb_init(struct nouveau_object *object)
60{
61 struct nv04_fb_priv *priv = (void *)object;
62 int ret;
63
64 ret = nouveau_fb_init(&priv->base);
65 if (ret)
66 return ret;
67
68 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
69 * nvidia reading PFB_CFG_0, then writing back its original value.
70 * (which was 0x701114 in this case)
71 */
72 nv_wr32(priv, NV04_PFB_CFG0, 0x1114);
73 return 0;
74}
75
76static int
77nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
78 struct nouveau_oclass *oclass, void *data, u32 size,
79 struct nouveau_object **pobject)
80{
81 struct nv04_fb_priv *priv;
82 u32 boot0;
83 int ret;
84
85 ret = nouveau_fb_create(parent, engine, oclass, &priv);
86 *pobject = nv_object(priv);
87 if (ret)
88 return ret;
89
90 boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
91 if (boot0 & 0x00000100) {
92 priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
93 priv->base.ram.size *= 1024 * 1024;
94 } else {
95 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
96 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
97 priv->base.ram.size = 32 * 1024 * 1024;
98 break;
99 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
100 priv->base.ram.size = 16 * 1024 * 1024;
101 break;
102 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
103 priv->base.ram.size = 8 * 1024 * 1024;
104 break;
105 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
106 priv->base.ram.size = 4 * 1024 * 1024;
107 break;
108 }
109 }
110
111 if ((boot0 & 0x00000038) <= 0x10)
112 priv->base.ram.type = NV_MEM_TYPE_SGRAM;
113 else
114 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
115
116
117 priv->base.memtype_valid = nv04_fb_memtype_valid;
118 return nouveau_fb_created(&priv->base);
119}
120
121struct nouveau_oclass
122nv04_fb_oclass = {
123 .handle = NV_SUBDEV(FB, 0x04),
124 .ofuncs = &(struct nouveau_ofuncs) {
125 .ctor = nv04_fb_ctor,
126 .dtor = _nouveau_fb_dtor,
127 .init = nv04_fb_init,
128 .fini = _nouveau_fb_fini,
129 },
130};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
new file mode 100644
index 000000000000..f037a422d2f4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv10_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 tile->addr = 0x80000000 | addr;
38 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch;
40}
41
42static void
43nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 tile->addr = 0;
46 tile->limit = 0;
47 tile->pitch = 0;
48 tile->zcomp = 0;
49}
50
51void
52nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
53{
54 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
55 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
56 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
57}
58
59static int
60nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 struct nouveau_oclass *oclass, void *data, u32 size,
62 struct nouveau_object **pobject)
63{
64 struct nouveau_device *device = nv_device(parent);
65 struct nv10_fb_priv *priv;
66 int ret;
67
68 ret = nouveau_fb_create(parent, engine, oclass, &priv);
69 *pobject = nv_object(priv);
70 if (ret)
71 return ret;
72
73 if (device->chipset == 0x1a || device->chipset == 0x1f) {
74 struct pci_dev *bridge;
75 u32 mem, mib;
76
77 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
78 if (!bridge) {
79 nv_fatal(device, "no bridge device\n");
80 return 0;
81 }
82
83 if (device->chipset == 0x1a) {
84 pci_read_config_dword(bridge, 0x7c, &mem);
85 mib = ((mem >> 6) & 31) + 1;
86 } else {
87 pci_read_config_dword(bridge, 0x84, &mem);
88 mib = ((mem >> 4) & 127) + 1;
89 }
90
91 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
92 priv->base.ram.size = mib * 1024 * 1024;
93 } else {
94 u32 cfg0 = nv_rd32(priv, 0x100200);
95 if (cfg0 & 0x00000001)
96 priv->base.ram.type = NV_MEM_TYPE_DDR1;
97 else
98 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
99
100 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
101 }
102
103 priv->base.memtype_valid = nv04_fb_memtype_valid;
104 priv->base.tile.regions = 8;
105 priv->base.tile.init = nv10_fb_tile_init;
106 priv->base.tile.fini = nv10_fb_tile_fini;
107 priv->base.tile.prog = nv10_fb_tile_prog;
108 return nouveau_fb_created(&priv->base);
109}
110
111struct nouveau_oclass
112nv10_fb_oclass = {
113 .handle = NV_SUBDEV(FB, 0x10),
114 .ofuncs = &(struct nouveau_ofuncs) {
115 .ctor = nv10_fb_ctor,
116 .dtor = _nouveau_fb_dtor,
117 .init = _nouveau_fb_init,
118 .fini = _nouveau_fb_fini,
119 },
120};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
new file mode 100644
index 000000000000..4b3578fcb7fb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -0,0 +1,136 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv20_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 struct nouveau_device *device = nv_device(pfb);
38 int bpp = (flags & 2) ? 32 : 16;
39
40 tile->addr = 0x00000001 | addr;
41 tile->limit = max(1u, addr + size) - 1;
42 tile->pitch = pitch;
43
44 /* Allocate some of the on-die tag memory, used to store Z
45 * compression meta-data (most likely just a bitmap determining
46 * if a given tile is compressed or not).
47 */
48 size /= 256;
49 if (flags & 4) {
50 if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) {
51 /* Enable Z compression */
52 tile->zcomp = tile->tag->offset;
53 if (device->chipset >= 0x25) {
54 if (bpp == 16)
55 tile->zcomp |= 0x00100000;
56 else
57 tile->zcomp |= 0x00200000;
58 } else {
59 tile->zcomp |= 0x80000000;
60 if (bpp != 16)
61 tile->zcomp |= 0x04000000;
62 }
63 }
64
65 tile->addr |= 2;
66 }
67}
68
69static void
70nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
71{
72 tile->addr = 0;
73 tile->limit = 0;
74 tile->pitch = 0;
75 tile->zcomp = 0;
76 nouveau_mm_free(&pfb->tags, &tile->tag);
77}
78
79static void
80nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
81{
82 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
83 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
84 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
85 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
86}
87
88static int
89nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 struct nouveau_oclass *oclass, void *data, u32 size,
91 struct nouveau_object **pobject)
92{
93 struct nouveau_device *device = nv_device(parent);
94 struct nv20_fb_priv *priv;
95 u32 pbus1218;
96 int ret;
97
98 ret = nouveau_fb_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 pbus1218 = nv_rd32(priv, 0x001218);
104 switch (pbus1218 & 0x00000300) {
105 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
106 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
107 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
108 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
109 }
110 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
111
112 if (device->chipset >= 0x25)
113 ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
114 else
115 ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
116 if (ret)
117 return ret;
118
119 priv->base.memtype_valid = nv04_fb_memtype_valid;
120 priv->base.tile.regions = 8;
121 priv->base.tile.init = nv20_fb_tile_init;
122 priv->base.tile.fini = nv20_fb_tile_fini;
123 priv->base.tile.prog = nv20_fb_tile_prog;
124 return nouveau_fb_created(&priv->base);
125}
126
127struct nouveau_oclass
128nv20_fb_oclass = {
129 .handle = NV_SUBDEV(FB, 0x20),
130 .ofuncs = &(struct nouveau_ofuncs) {
131 .ctor = nv20_fb_ctor,
132 .dtor = _nouveau_fb_dtor,
133 .init = _nouveau_fb_init,
134 .fini = _nouveau_fb_fini,
135 },
136};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
new file mode 100644
index 000000000000..cba67bc91390
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -0,0 +1,148 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv30_fb_priv {
30 struct nouveau_fb base;
31};
32
33void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 tile->addr = addr | 1;
38 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch;
40}
41
42void
43nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 tile->addr = 0;
46 tile->limit = 0;
47 tile->pitch = 0;
48}
49
50static int
51calc_bias(struct nv30_fb_priv *priv, int k, int i, int j)
52{
53 struct nouveau_device *device = nv_device(priv);
54 int b = (device->chipset > 0x30 ?
55 nv_rd32(priv, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
56 0) & 0xf;
57
58 return 2 * (b & 0x8 ? b - 0x10 : b);
59}
60
61static int
62calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
63{
64 int j, x = 0;
65
66 for (j = 0; j < 4; j++) {
67 int m = (l >> (8 * i) & 0xff) + calc_bias(priv, k, i, j);
68
69 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
70 }
71
72 return x;
73}
74
75static int
76nv30_fb_init(struct nouveau_object *object)
77{
78 struct nouveau_device *device = nv_device(object);
79 struct nv30_fb_priv *priv = (void *)object;
80 int ret, i, j;
81
82 ret = nouveau_fb_init(&priv->base);
83 if (ret)
84 return ret;
85
86 /* Init the memory timing regs at 0x10037c/0x1003ac */
87 if (device->chipset == 0x30 ||
88 device->chipset == 0x31 ||
89 device->chipset == 0x35) {
90 /* Related to ROP count */
91 int n = (device->chipset == 0x31 ? 2 : 4);
92 int l = nv_rd32(priv, 0x1003d0);
93
94 for (i = 0; i < n; i++) {
95 for (j = 0; j < 3; j++)
96 nv_wr32(priv, 0x10037c + 0xc * i + 0x4 * j,
97 calc_ref(priv, l, 0, j));
98
99 for (j = 0; j < 2; j++)
100 nv_wr32(priv, 0x1003ac + 0x8 * i + 0x4 * j,
101 calc_ref(priv, l, 1, j));
102 }
103 }
104
105 return 0;
106}
107
108static int
109nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nv30_fb_priv *priv;
114 u32 pbus1218;
115 int ret;
116
117 ret = nouveau_fb_create(parent, engine, oclass, &priv);
118 *pobject = nv_object(priv);
119 if (ret)
120 return ret;
121
122 pbus1218 = nv_rd32(priv, 0x001218);
123 switch (pbus1218 & 0x00000300) {
124 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
125 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
126 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
127 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
128 }
129 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
130
131 priv->base.memtype_valid = nv04_fb_memtype_valid;
132 priv->base.tile.regions = 8;
133 priv->base.tile.init = nv30_fb_tile_init;
134 priv->base.tile.fini = nv30_fb_tile_fini;
135 priv->base.tile.prog = nv10_fb_tile_prog;
136 return nouveau_fb_created(&priv->base);
137}
138
139struct nouveau_oclass
140nv30_fb_oclass = {
141 .handle = NV_SUBDEV(FB, 0x30),
142 .ofuncs = &(struct nouveau_ofuncs) {
143 .ctor = nv30_fb_ctor,
144 .dtor = _nouveau_fb_dtor,
145 .init = nv30_fb_init,
146 .fini = _nouveau_fb_fini,
147 },
148};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
new file mode 100644
index 000000000000..347a496fcad8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv40_fb_priv {
30 struct nouveau_fb base;
31};
32
33static inline int
34nv44_graph_class(struct nouveau_device *device)
35{
36 if ((device->chipset & 0xf0) == 0x60)
37 return 1;
38
39 return !(0x0baf & (1 << (device->chipset & 0x0f)));
40}
41
42static void
43nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{
45 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
46 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
47 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
48}
49
50static void
51nv40_fb_init_gart(struct nv40_fb_priv *priv)
52{
53 nv_wr32(priv, 0x100800, 0x00000001);
54}
55
56static void
57nv44_fb_init_gart(struct nv40_fb_priv *priv)
58{
59 nv_wr32(priv, 0x100850, 0x80000000);
60 nv_wr32(priv, 0x100800, 0x00000001);
61}
62
63static int
64nv40_fb_init(struct nouveau_object *object)
65{
66 struct nv40_fb_priv *priv = (void *)object;
67 int ret;
68
69 ret = nouveau_fb_init(&priv->base);
70 if (ret)
71 return ret;
72
73 switch (nv_device(priv)->chipset) {
74 case 0x40:
75 case 0x45:
76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
77 break;
78 default:
79 if (nv44_graph_class(nv_device(priv)))
80 nv44_fb_init_gart(priv);
81 else
82 nv40_fb_init_gart(priv);
83 break;
84 }
85
86 return 0;
87}
88
89static int
90nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 struct nouveau_oclass *oclass, void *data, u32 size,
92 struct nouveau_object **pobject)
93{
94 struct nouveau_device *device = nv_device(parent);
95 struct nv40_fb_priv *priv;
96 int ret;
97
98 ret = nouveau_fb_create(parent, engine, oclass, &priv);
99 *pobject = nv_object(priv);
100 if (ret)
101 return ret;
102
103 /* 0x001218 is actually present on a few other NV4X I looked at,
104 * and even contains sane values matching 0x100474. From looking
105 * at various vbios images however, this isn't the case everywhere.
106 * So, I chose to use the same regs I've seen NVIDIA reading around
107 * the memory detection, hopefully that'll get us the right numbers
108 */
109 if (device->chipset == 0x40) {
110 u32 pbus1218 = nv_rd32(priv, 0x001218);
111 switch (pbus1218 & 0x00000300) {
112 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
113 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
114 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
115 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
116 }
117 } else
118 if (device->chipset == 0x49 || device->chipset == 0x4b) {
119 u32 pfb914 = nv_rd32(priv, 0x100914);
120 switch (pfb914 & 0x00000003) {
121 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
122 case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
123 case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
124 case 0x00000003: break;
125 }
126 } else
127 if (device->chipset != 0x4e) {
128 u32 pfb474 = nv_rd32(priv, 0x100474);
129 if (pfb474 & 0x00000004)
130 priv->base.ram.type = NV_MEM_TYPE_GDDR3;
131 if (pfb474 & 0x00000002)
132 priv->base.ram.type = NV_MEM_TYPE_DDR2;
133 if (pfb474 & 0x00000001)
134 priv->base.ram.type = NV_MEM_TYPE_DDR1;
135 } else {
136 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
137 }
138
139 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
140
141 priv->base.memtype_valid = nv04_fb_memtype_valid;
142 switch (device->chipset) {
143 case 0x40:
144 case 0x45:
145 priv->base.tile.regions = 8;
146 break;
147 case 0x46:
148 case 0x47:
149 case 0x49:
150 case 0x4b:
151 case 0x4c:
152 priv->base.tile.regions = 15;
153 break;
154 default:
155 priv->base.tile.regions = 12;
156 break;
157 }
158 priv->base.tile.init = nv30_fb_tile_init;
159 priv->base.tile.fini = nv30_fb_tile_fini;
160 if (device->chipset == 0x40)
161 priv->base.tile.prog = nv10_fb_tile_prog;
162 else
163 priv->base.tile.prog = nv40_fb_tile_prog;
164
165 return nouveau_fb_created(&priv->base);
166}
167
168
169struct nouveau_oclass
170nv40_fb_oclass = {
171 .handle = NV_SUBDEV(FB, 0x40),
172 .ofuncs = &(struct nouveau_ofuncs) {
173 .ctor = nv40_fb_ctor,
174 .dtor = _nouveau_fb_dtor,
175 .init = nv40_fb_init,
176 .fini = _nouveau_fb_fini,
177 },
178};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
new file mode 100644
index 000000000000..436e9efe7ef5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -0,0 +1,498 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/enum.h>
27
28#include <subdev/fb.h>
29#include <subdev/bios.h>
30
31struct nv50_fb_priv {
32 struct nouveau_fb base;
33 struct page *r100c08_page;
34 dma_addr_t r100c08;
35};
36
37static int types[0x80] = {
38 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
40 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
42 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
43 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
45 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
46};
47
48static bool
49nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
50{
51 return types[(memtype & 0xff00) >> 8] != 0;
52}
53
54static int
55nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
56 u32 memtype, struct nouveau_mem **pmem)
57{
58 struct nv50_fb_priv *priv = (void *)pfb;
59 struct nouveau_mm *heap = &priv->base.vram;
60 struct nouveau_mm *tags = &priv->base.tags;
61 struct nouveau_mm_node *r;
62 struct nouveau_mem *mem;
63 int comp = (memtype & 0x300) >> 8;
64 int type = (memtype & 0x07f);
65 int back = (memtype & 0x800);
66 int min, max, ret;
67
68 max = (size >> 12);
69 min = ncmin ? (ncmin >> 12) : max;
70 align >>= 12;
71
72 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
73 if (!mem)
74 return -ENOMEM;
75
76 mutex_lock(&pfb->base.mutex);
77 if (comp) {
78 if (align == 16) {
79 int n = (max >> 4) * comp;
80
81 ret = nouveau_mm_head(tags, 1, n, n, 1, &mem->tag);
82 if (ret)
83 mem->tag = NULL;
84 }
85
86 if (unlikely(!mem->tag))
87 comp = 0;
88 }
89
90 INIT_LIST_HEAD(&mem->regions);
91 mem->memtype = (comp << 7) | type;
92 mem->size = max;
93
94 type = types[type];
95 do {
96 if (back)
97 ret = nouveau_mm_tail(heap, type, max, min, align, &r);
98 else
99 ret = nouveau_mm_head(heap, type, max, min, align, &r);
100 if (ret) {
101 mutex_unlock(&pfb->base.mutex);
102 pfb->ram.put(pfb, &mem);
103 return ret;
104 }
105
106 list_add_tail(&r->rl_entry, &mem->regions);
107 max -= r->length;
108 } while (max);
109 mutex_unlock(&pfb->base.mutex);
110
111 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
112 mem->offset = (u64)r->offset << 12;
113 *pmem = mem;
114 return 0;
115}
116
117void
118nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
119{
120 struct nv50_fb_priv *priv = (void *)pfb;
121 struct nouveau_mm_node *this;
122 struct nouveau_mem *mem;
123
124 mem = *pmem;
125 *pmem = NULL;
126 if (unlikely(mem == NULL))
127 return;
128
129 mutex_lock(&pfb->base.mutex);
130 while (!list_empty(&mem->regions)) {
131 this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
132
133 list_del(&this->rl_entry);
134 nouveau_mm_free(&priv->base.vram, &this);
135 }
136
137 nouveau_mm_free(&priv->base.tags, &mem->tag);
138 mutex_unlock(&pfb->base.mutex);
139
140 kfree(mem);
141}
142
143static u32
144nv50_vram_rblock(struct nv50_fb_priv *priv)
145{
146 int i, parts, colbits, rowbitsa, rowbitsb, banks;
147 u64 rowsize, predicted;
148 u32 r0, r4, rt, ru, rblock_size;
149
150 r0 = nv_rd32(priv, 0x100200);
151 r4 = nv_rd32(priv, 0x100204);
152 rt = nv_rd32(priv, 0x100250);
153 ru = nv_rd32(priv, 0x001540);
154 nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
155
156 for (i = 0, parts = 0; i < 8; i++) {
157 if (ru & (0x00010000 << i))
158 parts++;
159 }
160
161 colbits = (r4 & 0x0000f000) >> 12;
162 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
163 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
164 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
165
166 rowsize = parts * banks * (1 << colbits) * 8;
167 predicted = rowsize << rowbitsa;
168 if (r0 & 0x00000004)
169 predicted += rowsize << rowbitsb;
170
171 if (predicted != priv->base.ram.size) {
172 nv_warn(priv, "memory controller reports %d MiB VRAM\n",
173 (u32)(priv->base.ram.size >> 20));
174 }
175
176 rblock_size = rowsize;
177 if (rt & 1)
178 rblock_size *= 3;
179
180 nv_debug(priv, "rblock %d bytes\n", rblock_size);
181 return rblock_size;
182}
183
184static int
185nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
186 struct nouveau_oclass *oclass, void *data, u32 size,
187 struct nouveau_object **pobject)
188{
189 struct nouveau_device *device = nv_device(parent);
190 struct nouveau_bios *bios = nouveau_bios(device);
191 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
192 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
193 struct nv50_fb_priv *priv;
194 u32 tags;
195 int ret;
196
197 ret = nouveau_fb_create(parent, engine, oclass, &priv);
198 *pobject = nv_object(priv);
199 if (ret)
200 return ret;
201
202 switch (nv_rd32(priv, 0x100714) & 0x00000007) {
203 case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
204 case 1:
205 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
206 priv->base.ram.type = NV_MEM_TYPE_DDR3;
207 else
208 priv->base.ram.type = NV_MEM_TYPE_DDR2;
209 break;
210 case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
211 case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
212 case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
213 default:
214 break;
215 }
216
217 priv->base.ram.size = nv_rd32(priv, 0x10020c);
218 priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
219 ((priv->base.ram.size & 0x000000ff) << 32);
220
221 tags = nv_rd32(priv, 0x100320);
222 if (tags) {
223 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
224 if (ret)
225 return ret;
226
227 nv_debug(priv, "%d compression tags\n", tags);
228 }
229
230 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
231 switch (device->chipset) {
232 case 0xaa:
233 case 0xac:
234 case 0xaf: /* IGPs, no reordering, no real VRAM */
235 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
236 if (ret)
237 return ret;
238
239 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
240 break;
241 default:
242 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
243 nv50_vram_rblock(priv) >> 12);
244 if (ret)
245 return ret;
246
247 priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
248 break;
249 }
250
251 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
252 if (priv->r100c08_page) {
253 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
254 0, PAGE_SIZE,
255 PCI_DMA_BIDIRECTIONAL);
256 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
257 nv_warn(priv, "failed 0x100c08 page map\n");
258 } else {
259 nv_warn(priv, "failed 0x100c08 page alloc\n");
260 }
261
262 priv->base.memtype_valid = nv50_fb_memtype_valid;
263 priv->base.ram.get = nv50_fb_vram_new;
264 priv->base.ram.put = nv50_fb_vram_del;
265 return nouveau_fb_created(&priv->base);
266}
267
268static void
269nv50_fb_dtor(struct nouveau_object *object)
270{
271 struct nouveau_device *device = nv_device(object);
272 struct nv50_fb_priv *priv = (void *)object;
273
274 if (priv->r100c08_page) {
275 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
276 PCI_DMA_BIDIRECTIONAL);
277 __free_page(priv->r100c08_page);
278 }
279
280 nouveau_mm_fini(&priv->base.vram);
281 nouveau_fb_destroy(&priv->base);
282}
283
284static int
285nv50_fb_init(struct nouveau_object *object)
286{
287 struct nouveau_device *device = nv_device(object);
288 struct nv50_fb_priv *priv = (void *)object;
289 int ret;
290
291 ret = nouveau_fb_init(&priv->base);
292 if (ret)
293 return ret;
294
295 /* Not a clue what this is exactly. Without pointing it at a
296 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
297 * cause IOMMU "read from address 0" errors (rh#561267)
298 */
299 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
300
301 /* This is needed to get meaningful information from 100c90
302 * on traps. No idea what these values mean exactly. */
303 switch (device->chipset) {
304 case 0x50:
305 nv_wr32(priv, 0x100c90, 0x000707ff);
306 break;
307 case 0xa3:
308 case 0xa5:
309 case 0xa8:
310 nv_wr32(priv, 0x100c90, 0x000d0fff);
311 break;
312 case 0xaf:
313 nv_wr32(priv, 0x100c90, 0x089d1fff);
314 break;
315 default:
316 nv_wr32(priv, 0x100c90, 0x001d07ff);
317 break;
318 }
319
320 return 0;
321}
322
323struct nouveau_oclass
324nv50_fb_oclass = {
325 .handle = NV_SUBDEV(FB, 0x50),
326 .ofuncs = &(struct nouveau_ofuncs) {
327 .ctor = nv50_fb_ctor,
328 .dtor = nv50_fb_dtor,
329 .init = nv50_fb_init,
330 .fini = _nouveau_fb_fini,
331 },
332};
333
334static const struct nouveau_enum vm_dispatch_subclients[] = {
335 { 0x00000000, "GRCTX", NULL },
336 { 0x00000001, "NOTIFY", NULL },
337 { 0x00000002, "QUERY", NULL },
338 { 0x00000003, "COND", NULL },
339 { 0x00000004, "M2M_IN", NULL },
340 { 0x00000005, "M2M_OUT", NULL },
341 { 0x00000006, "M2M_NOTIFY", NULL },
342 {}
343};
344
345static const struct nouveau_enum vm_ccache_subclients[] = {
346 { 0x00000000, "CB", NULL },
347 { 0x00000001, "TIC", NULL },
348 { 0x00000002, "TSC", NULL },
349 {}
350};
351
352static const struct nouveau_enum vm_prop_subclients[] = {
353 { 0x00000000, "RT0", NULL },
354 { 0x00000001, "RT1", NULL },
355 { 0x00000002, "RT2", NULL },
356 { 0x00000003, "RT3", NULL },
357 { 0x00000004, "RT4", NULL },
358 { 0x00000005, "RT5", NULL },
359 { 0x00000006, "RT6", NULL },
360 { 0x00000007, "RT7", NULL },
361 { 0x00000008, "ZETA", NULL },
362 { 0x00000009, "LOCAL", NULL },
363 { 0x0000000a, "GLOBAL", NULL },
364 { 0x0000000b, "STACK", NULL },
365 { 0x0000000c, "DST2D", NULL },
366 {}
367};
368
369static const struct nouveau_enum vm_pfifo_subclients[] = {
370 { 0x00000000, "PUSHBUF", NULL },
371 { 0x00000001, "SEMAPHORE", NULL },
372 {}
373};
374
375static const struct nouveau_enum vm_bar_subclients[] = {
376 { 0x00000000, "FB", NULL },
377 { 0x00000001, "IN", NULL },
378 {}
379};
380
381static const struct nouveau_enum vm_client[] = {
382 { 0x00000000, "STRMOUT", NULL },
383 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
384 { 0x00000004, "PFIFO_WRITE", NULL },
385 { 0x00000005, "CCACHE", vm_ccache_subclients },
386 { 0x00000006, "PPPP", NULL },
387 { 0x00000007, "CLIPID", NULL },
388 { 0x00000008, "PFIFO_READ", NULL },
389 { 0x00000009, "VFETCH", NULL },
390 { 0x0000000a, "TEXTURE", NULL },
391 { 0x0000000b, "PROP", vm_prop_subclients },
392 { 0x0000000c, "PVP", NULL },
393 { 0x0000000d, "PBSP", NULL },
394 { 0x0000000e, "PCRYPT", NULL },
395 { 0x0000000f, "PCOUNTER", NULL },
396 { 0x00000011, "PDAEMON", NULL },
397 {}
398};
399
400static const struct nouveau_enum vm_engine[] = {
401 { 0x00000000, "PGRAPH", NULL },
402 { 0x00000001, "PVP", NULL },
403 { 0x00000004, "PEEPHOLE", NULL },
404 { 0x00000005, "PFIFO", vm_pfifo_subclients },
405 { 0x00000006, "BAR", vm_bar_subclients },
406 { 0x00000008, "PPPP", NULL },
407 { 0x00000009, "PBSP", NULL },
408 { 0x0000000a, "PCRYPT", NULL },
409 { 0x0000000b, "PCOUNTER", NULL },
410 { 0x0000000c, "SEMAPHORE_BG", NULL },
411 { 0x0000000d, "PCOPY", NULL },
412 { 0x0000000e, "PDAEMON", NULL },
413 {}
414};
415
416static const struct nouveau_enum vm_fault[] = {
417 { 0x00000000, "PT_NOT_PRESENT", NULL },
418 { 0x00000001, "PT_TOO_SHORT", NULL },
419 { 0x00000002, "PAGE_NOT_PRESENT", NULL },
420 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
421 { 0x00000004, "PAGE_READ_ONLY", NULL },
422 { 0x00000006, "NULL_DMAOBJ", NULL },
423 { 0x00000007, "WRONG_MEMTYPE", NULL },
424 { 0x0000000b, "VRAM_LIMIT", NULL },
425 { 0x0000000f, "DMAOBJ_LIMIT", NULL },
426 {}
427};
428
429void
430nv50_fb_trap(struct nouveau_fb *pfb, int display)
431{
432 struct nouveau_device *device = nv_device(pfb);
433 struct nv50_fb_priv *priv = (void *)pfb;
434 const struct nouveau_enum *en, *cl;
435 u32 trap[6], idx, chan;
436 u8 st0, st1, st2, st3;
437 int i;
438
439 idx = nv_rd32(priv, 0x100c90);
440 if (!(idx & 0x80000000))
441 return;
442 idx &= 0x00ffffff;
443
444 for (i = 0; i < 6; i++) {
445 nv_wr32(priv, 0x100c90, idx | i << 24);
446 trap[i] = nv_rd32(priv, 0x100c94);
447 }
448 nv_wr32(priv, 0x100c90, idx | 0x80000000);
449
450 if (!display)
451 return;
452
453 /* decode status bits into something more useful */
454 if (device->chipset < 0xa3 ||
455 device->chipset == 0xaa || device->chipset == 0xac) {
456 st0 = (trap[0] & 0x0000000f) >> 0;
457 st1 = (trap[0] & 0x000000f0) >> 4;
458 st2 = (trap[0] & 0x00000f00) >> 8;
459 st3 = (trap[0] & 0x0000f000) >> 12;
460 } else {
461 st0 = (trap[0] & 0x000000ff) >> 0;
462 st1 = (trap[0] & 0x0000ff00) >> 8;
463 st2 = (trap[0] & 0x00ff0000) >> 16;
464 st3 = (trap[0] & 0xff000000) >> 24;
465 }
466 chan = (trap[2] << 16) | trap[1];
467
468 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ",
469 (trap[5] & 0x00000100) ? "read" : "write",
470 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan);
471
472 en = nouveau_enum_find(vm_engine, st0);
473 if (en)
474 printk("%s/", en->name);
475 else
476 printk("%02x/", st0);
477
478 cl = nouveau_enum_find(vm_client, st2);
479 if (cl)
480 printk("%s/", cl->name);
481 else
482 printk("%02x/", st2);
483
484 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
485 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
486 else cl = NULL;
487 if (cl)
488 printk("%s", cl->name);
489 else
490 printk("%02x", st3);
491
492 printk(" reason: ");
493 en = nouveau_enum_find(vm_fault, st1);
494 if (en)
495 printk("%s\n", en->name);
496 else
497 printk("0x%08x\n", st1);
498}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
new file mode 100644
index 000000000000..9f59f2bf0079
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26#include <subdev/bios.h>
27
28struct nvc0_fb_priv {
29 struct nouveau_fb base;
30 struct page *r100c10_page;
31 dma_addr_t r100c10;
32};
33
34/* 0 = unsupported
35 * 1 = non-compressed
36 * 3 = compressed
37 */
38static const u8 types[256] = {
39 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
40 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
43 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
46 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
47 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
48 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
50 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
51 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
52 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
53 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
54 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
55};
56
57static bool
58nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
59{
60 u8 memtype = (tile_flags & 0x0000ff00) >> 8;
61 return likely((types[memtype] == 1));
62}
63
64static int
65nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
66 u32 memtype, struct nouveau_mem **pmem)
67{
68 struct nouveau_mm *mm = &pfb->vram;
69 struct nouveau_mm_node *r;
70 struct nouveau_mem *mem;
71 int type = (memtype & 0x0ff);
72 int back = (memtype & 0x800);
73 int ret;
74
75 size >>= 12;
76 align >>= 12;
77 ncmin >>= 12;
78 if (!ncmin)
79 ncmin = size;
80
81 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
82 if (!mem)
83 return -ENOMEM;
84
85 INIT_LIST_HEAD(&mem->regions);
86 mem->memtype = type;
87 mem->size = size;
88
89 mutex_lock(&mm->mutex);
90 do {
91 if (back)
92 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
93 else
94 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
95 if (ret) {
96 mutex_unlock(&mm->mutex);
97 pfb->ram.put(pfb, &mem);
98 return ret;
99 }
100
101 list_add_tail(&r->rl_entry, &mem->regions);
102 size -= r->length;
103 } while (size);
104 mutex_unlock(&mm->mutex);
105
106 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
107 mem->offset = (u64)r->offset << 12;
108 *pmem = mem;
109 return 0;
110}
111
112static int
113nvc0_fb_init(struct nouveau_object *object)
114{
115 struct nvc0_fb_priv *priv = (void *)object;
116 int ret;
117
118 ret = nouveau_fb_init(&priv->base);
119 if (ret)
120 return ret;
121
122 nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
123 return 0;
124}
125
126static void
127nvc0_fb_dtor(struct nouveau_object *object)
128{
129 struct nouveau_device *device = nv_device(object);
130 struct nvc0_fb_priv *priv = (void *)object;
131
132 if (priv->r100c10_page) {
133 pci_unmap_page(device->pdev, priv->r100c10, PAGE_SIZE,
134 PCI_DMA_BIDIRECTIONAL);
135 __free_page(priv->r100c10_page);
136 }
137
138 nouveau_fb_destroy(&priv->base);
139}
140
141static int
142nvc0_vram_detect(struct nvc0_fb_priv *priv)
143{
144 struct nouveau_bios *bios = nouveau_bios(priv);
145 struct nouveau_fb *pfb = &priv->base;
146 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
147 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
148 u32 parts = nv_rd32(priv, 0x022438);
149 u32 pmask = nv_rd32(priv, 0x022554);
150 u32 bsize = nv_rd32(priv, 0x10f20c);
151 u32 offset, length;
152 bool uniform = true;
153 int ret, part;
154
155 nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
156 nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
157
158 priv->base.ram.type = nouveau_fb_bios_memtype(bios);
159 priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
160
161 /* read amount of vram attached to each memory controller */
162 for (part = 0; part < parts; part++) {
163 if (!(pmask & (1 << part))) {
164 u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
165 if (psize != bsize) {
166 if (psize < bsize)
167 bsize = psize;
168 uniform = false;
169 }
170
171 nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
172 priv->base.ram.size += (u64)psize << 20;
173 }
174 }
175
176 /* if all controllers have the same amount attached, there's no holes */
177 if (uniform) {
178 offset = rsvd_head;
179 length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
180 return nouveau_mm_init(&pfb->vram, offset, length, 1);
181 }
182
183 /* otherwise, address lowest common amount from 0GiB */
184 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
185 if (ret)
186 return ret;
187
188 /* and the rest starting from (8GiB + common_size) */
189 offset = (0x0200000000ULL >> 12) + (bsize << 8);
190 length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
191
192 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
193 if (ret) {
194 nouveau_mm_fini(&pfb->vram);
195 return ret;
196 }
197
198 return 0;
199}
200
201static int
202nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nouveau_oclass *oclass, void *data, u32 size,
204 struct nouveau_object **pobject)
205{
206 struct nouveau_device *device = nv_device(parent);
207 struct nvc0_fb_priv *priv;
208 int ret;
209
210 ret = nouveau_fb_create(parent, engine, oclass, &priv);
211 *pobject = nv_object(priv);
212 if (ret)
213 return ret;
214
215 priv->base.memtype_valid = nvc0_fb_memtype_valid;
216 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del;
218
219 ret = nvc0_vram_detect(priv);
220 if (ret)
221 return ret;
222
223 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 if (!priv->r100c10_page)
225 return -ENOMEM;
226
227 priv->r100c10 = pci_map_page(device->pdev, priv->r100c10_page, 0,
228 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
229 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
230 return -EFAULT;
231
232 return nouveau_fb_created(&priv->base);
233}
234
235
236struct nouveau_oclass
237nvc0_fb_oclass = {
238 .handle = NV_SUBDEV(FB, 0xc0),
239 .ofuncs = &(struct nouveau_ofuncs) {
240 .ctor = nvc0_fb_ctor,
241 .dtor = nvc0_fb_dtor,
242 .init = nvc0_fb_init,
243 .fini = _nouveau_fb_fini,
244 },
245};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
new file mode 100644
index 000000000000..acf818c58bf0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26#include <subdev/bios.h>
27#include <subdev/bios/gpio.h>
28
29static int
30nouveau_gpio_drive(struct nouveau_gpio *gpio,
31 int idx, int line, int dir, int out)
32{
33 return gpio->drive ? gpio->drive(gpio, line, dir, out) : -ENODEV;
34}
35
36static int
37nouveau_gpio_sense(struct nouveau_gpio *gpio, int idx, int line)
38{
39 return gpio->sense ? gpio->sense(gpio, line) : -ENODEV;
40}
41
42static int
43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
44 struct dcb_gpio_func *func)
45{
46 if (line == 0xff && tag == 0xff)
47 return -EINVAL;
48
49 if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func))
50 return 0;
51
52 /* Apple iMac G4 NV18 */
53 if (nv_device_match(nv_object(gpio), 0x0189, 0x10de, 0x0010)) {
54 if (tag == DCB_GPIO_TVDAC0) {
55 *func = (struct dcb_gpio_func) {
56 .func = DCB_GPIO_TVDAC0,
57 .line = 4,
58 .log[0] = 0,
59 .log[1] = 1,
60 };
61 return 0;
62 }
63 }
64
65 return -EINVAL;
66}
67
68static int
69nouveau_gpio_set(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, int state)
70{
71 struct dcb_gpio_func func;
72 int ret;
73
74 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
75 if (ret == 0) {
76 int dir = !!(func.log[state] & 0x02);
77 int out = !!(func.log[state] & 0x01);
78 ret = nouveau_gpio_drive(gpio, idx, func.line, dir, out);
79 }
80
81 return ret;
82}
83
84static int
85nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
86{
87 struct dcb_gpio_func func;
88 int ret;
89
90 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
91 if (ret == 0) {
92 ret = nouveau_gpio_sense(gpio, idx, func.line);
93 if (ret >= 0)
94 ret = (ret == (func.log[1] & 1));
95 }
96
97 return ret;
98}
99
100static int
101nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on)
102{
103 struct dcb_gpio_func func;
104 int ret;
105
106 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
107 if (ret == 0) {
108 if (idx == 0 && gpio->irq_enable)
109 gpio->irq_enable(gpio, func.line, on);
110 else
111 ret = -ENODEV;
112 }
113
114 return ret;
115}
116
117struct gpio_isr {
118 struct nouveau_gpio *gpio;
119 struct list_head head;
120 struct work_struct work;
121 int idx;
122 struct dcb_gpio_func func;
123 void (*handler)(void *, int);
124 void *data;
125 bool inhibit;
126};
127
128static void
129nouveau_gpio_isr_bh(struct work_struct *work)
130{
131 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
132 struct nouveau_gpio *gpio = isr->gpio;
133 unsigned long flags;
134 int state;
135
136 state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
137 isr->func.line);
138 if (state >= 0)
139 isr->handler(isr->data, state);
140
141 spin_lock_irqsave(&gpio->lock, flags);
142 isr->inhibit = false;
143 spin_unlock_irqrestore(&gpio->lock, flags);
144}
145
146static void
147nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
148{
149 struct gpio_isr *isr;
150
151 if (idx != 0)
152 return;
153
154 spin_lock(&gpio->lock);
155 list_for_each_entry(isr, &gpio->isr, head) {
156 if (line_mask & (1 << isr->func.line)) {
157 if (isr->inhibit)
158 continue;
159 isr->inhibit = true;
160 schedule_work(&isr->work);
161 }
162 }
163 spin_unlock(&gpio->lock);
164}
165
166static int
167nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
168 void (*handler)(void *, int), void *data)
169{
170 struct gpio_isr *isr;
171 unsigned long flags;
172 int ret;
173
174 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
175 if (!isr)
176 return -ENOMEM;
177
178 ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
179 if (ret) {
180 kfree(isr);
181 return ret;
182 }
183
184 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
185 isr->gpio = gpio;
186 isr->handler = handler;
187 isr->data = data;
188 isr->idx = idx;
189
190 spin_lock_irqsave(&gpio->lock, flags);
191 list_add(&isr->head, &gpio->isr);
192 spin_unlock_irqrestore(&gpio->lock, flags);
193 return 0;
194}
195
196static void
197nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
198 void (*handler)(void *, int), void *data)
199{
200 struct gpio_isr *isr, *tmp;
201 struct dcb_gpio_func func;
202 unsigned long flags;
203 LIST_HEAD(tofree);
204 int ret;
205
206 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
207 if (ret == 0) {
208 spin_lock_irqsave(&gpio->lock, flags);
209 list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
210 if (memcmp(&isr->func, &func, sizeof(func)) ||
211 isr->idx != idx ||
212 isr->handler != handler || isr->data != data)
213 continue;
214 list_move_tail(&isr->head, &tofree);
215 }
216 spin_unlock_irqrestore(&gpio->lock, flags);
217
218 list_for_each_entry_safe(isr, tmp, &tofree, head) {
219 flush_work(&isr->work);
220 kfree(isr);
221 }
222 }
223}
224
225int
226nouveau_gpio_create_(struct nouveau_object *parent,
227 struct nouveau_object *engine,
228 struct nouveau_oclass *oclass, int length, void **pobject)
229{
230 struct nouveau_gpio *gpio;
231 int ret;
232
233 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "GPIO", "gpio",
234 length, pobject);
235 gpio = *pobject;
236 if (ret)
237 return ret;
238
239 gpio->find = nouveau_gpio_find;
240 gpio->set = nouveau_gpio_set;
241 gpio->get = nouveau_gpio_get;
242 gpio->irq = nouveau_gpio_irq;
243 gpio->isr_run = nouveau_gpio_isr_run;
244 gpio->isr_add = nouveau_gpio_isr_add;
245 gpio->isr_del = nouveau_gpio_isr_del;
246 INIT_LIST_HEAD(&gpio->isr);
247 spin_lock_init(&gpio->lock);
248 return 0;
249}
250
251static struct dmi_system_id gpio_reset_ids[] = {
252 {
253 .ident = "Apple Macbook 10,1",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
256 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
257 }
258 },
259 { }
260};
261
262int
263nouveau_gpio_init(struct nouveau_gpio *gpio)
264{
265 int ret = nouveau_subdev_init(&gpio->base);
266 if (ret == 0 && gpio->reset) {
267 if (dmi_check_system(gpio_reset_ids))
268 gpio->reset(gpio);
269 }
270 return ret;
271}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
new file mode 100644
index 000000000000..168d16a9a8e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/gpio.h>
28
29struct nv10_gpio_priv {
30 struct nouveau_gpio base;
31};
32
33static int
34nv10_gpio_sense(struct nouveau_gpio *gpio, int line)
35{
36 if (line < 2) {
37 line = line * 16;
38 line = nv_rd32(gpio, 0x600818) >> line;
39 return !!(line & 0x0100);
40 } else
41 if (line < 10) {
42 line = (line - 2) * 4;
43 line = nv_rd32(gpio, 0x60081c) >> line;
44 return !!(line & 0x04);
45 } else
46 if (line < 14) {
47 line = (line - 10) * 4;
48 line = nv_rd32(gpio, 0x600850) >> line;
49 return !!(line & 0x04);
50 }
51
52 return -EINVAL;
53}
54
55static int
56nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
57{
58 u32 reg, mask, data;
59
60 if (line < 2) {
61 line = line * 16;
62 reg = 0x600818;
63 mask = 0x00000011;
64 data = (dir << 4) | out;
65 } else
66 if (line < 10) {
67 line = (line - 2) * 4;
68 reg = 0x60081c;
69 mask = 0x00000003;
70 data = (dir << 1) | out;
71 } else
72 if (line < 14) {
73 line = (line - 10) * 4;
74 reg = 0x600850;
75 mask = 0x00000003;
76 data = (dir << 1) | out;
77 } else {
78 return -EINVAL;
79 }
80
81 nv_mask(gpio, reg, mask << line, data << line);
82 return 0;
83}
84
85static void
86nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
87{
88 u32 mask = 0x00010001 << line;
89
90 nv_wr32(gpio, 0x001104, mask);
91 nv_mask(gpio, 0x001144, mask, on ? mask : 0);
92}
93
94static void
95nv10_gpio_intr(struct nouveau_subdev *subdev)
96{
97 struct nv10_gpio_priv *priv = (void *)subdev;
98 u32 intr = nv_rd32(priv, 0x001104);
99 u32 hi = (intr & 0x0000ffff) >> 0;
100 u32 lo = (intr & 0xffff0000) >> 16;
101
102 priv->base.isr_run(&priv->base, 0, hi | lo);
103
104 nv_wr32(priv, 0x001104, intr);
105}
106
107static int
108nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, void *data, u32 size,
110 struct nouveau_object **pobject)
111{
112 struct nv10_gpio_priv *priv;
113 int ret;
114
115 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
116 *pobject = nv_object(priv);
117 if (ret)
118 return ret;
119
120 priv->base.drive = nv10_gpio_drive;
121 priv->base.sense = nv10_gpio_sense;
122 priv->base.irq_enable = nv10_gpio_irq_enable;
123 nv_subdev(priv)->intr = nv10_gpio_intr;
124 return 0;
125}
126
127static void
128nv10_gpio_dtor(struct nouveau_object *object)
129{
130 struct nv10_gpio_priv *priv = (void *)object;
131 nouveau_gpio_destroy(&priv->base);
132}
133
134static int
135nv10_gpio_init(struct nouveau_object *object)
136{
137 struct nv10_gpio_priv *priv = (void *)object;
138 int ret;
139
140 ret = nouveau_gpio_init(&priv->base);
141 if (ret)
142 return ret;
143
144 nv_wr32(priv, 0x001140, 0x00000000);
145 nv_wr32(priv, 0x001100, 0xffffffff);
146 nv_wr32(priv, 0x001144, 0x00000000);
147 nv_wr32(priv, 0x001104, 0xffffffff);
148 return 0;
149}
150
151static int
152nv10_gpio_fini(struct nouveau_object *object, bool suspend)
153{
154 struct nv10_gpio_priv *priv = (void *)object;
155 nv_wr32(priv, 0x001140, 0x00000000);
156 nv_wr32(priv, 0x001144, 0x00000000);
157 return nouveau_gpio_fini(&priv->base, suspend);
158}
159
160struct nouveau_oclass
161nv10_gpio_oclass = {
162 .handle = NV_SUBDEV(GPIO, 0x10),
163 .ofuncs = &(struct nouveau_ofuncs) {
164 .ctor = nv10_gpio_ctor,
165 .dtor = nv10_gpio_dtor,
166 .init = nv10_gpio_init,
167 .fini = nv10_gpio_fini,
168 },
169};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
new file mode 100644
index 000000000000..f3502c961cd9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -0,0 +1,194 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27struct nv50_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31static void
32nv50_gpio_reset(struct nouveau_gpio *gpio)
33{
34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nv50_gpio_priv *priv = (void *)gpio;
36 u16 entry;
37 u8 ver;
38 int ent = -1;
39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
41 static const u32 regs[] = { 0xe100, 0xe28c };
42 u32 data = nv_ro32(bios, entry);
43 u8 line = (data & 0x0000001f);
44 u8 func = (data & 0x0000ff00) >> 8;
45 u8 defs = !!(data & 0x01000000);
46 u8 unk0 = !!(data & 0x02000000);
47 u8 unk1 = !!(data & 0x04000000);
48 u32 val = (unk1 << 16) | unk0;
49 u32 reg = regs[line >> 4]; line &= 0x0f;
50
51 if (func == 0xff)
52 continue;
53
54 gpio->set(gpio, 0, func, line, defs);
55
56 nv_mask(priv, reg, 0x00010001 << line, val << line);
57 }
58}
59
60static int
61nv50_gpio_location(int line, u32 *reg, u32 *shift)
62{
63 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
64
65 if (line >= 32)
66 return -EINVAL;
67
68 *reg = nv50_gpio_reg[line >> 3];
69 *shift = (line & 7) << 2;
70 return 0;
71}
72
73static int
74nv50_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
75{
76 u32 reg, shift;
77
78 if (nv50_gpio_location(line, &reg, &shift))
79 return -EINVAL;
80
81 nv_mask(gpio, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
82 return 0;
83}
84
85static int
86nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
87{
88 u32 reg, shift;
89
90 if (nv50_gpio_location(line, &reg, &shift))
91 return -EINVAL;
92
93 return !!(nv_rd32(gpio, reg) & (4 << shift));
94}
95
96void
97nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
98{
99 u32 reg = line < 16 ? 0xe050 : 0xe070;
100 u32 mask = 0x00010001 << (line & 0xf);
101
102 nv_wr32(gpio, reg + 4, mask);
103 nv_mask(gpio, reg + 0, mask, on ? mask : 0);
104}
105
106void
107nv50_gpio_intr(struct nouveau_subdev *subdev)
108{
109 struct nv50_gpio_priv *priv = (void *)subdev;
110 u32 intr0, intr1 = 0;
111 u32 hi, lo;
112
113 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
114 if (nv_device(priv)->chipset >= 0x90)
115 intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
116
117 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
118 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
119 priv->base.isr_run(&priv->base, 0, hi | lo);
120
121 nv_wr32(priv, 0xe054, intr0);
122 if (nv_device(priv)->chipset >= 0x90)
123 nv_wr32(priv, 0xe074, intr1);
124}
125
126static int
127nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
128 struct nouveau_oclass *oclass, void *data, u32 size,
129 struct nouveau_object **pobject)
130{
131 struct nv50_gpio_priv *priv;
132 int ret;
133
134 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
135 *pobject = nv_object(priv);
136 if (ret)
137 return ret;
138
139 priv->base.reset = nv50_gpio_reset;
140 priv->base.drive = nv50_gpio_drive;
141 priv->base.sense = nv50_gpio_sense;
142 priv->base.irq_enable = nv50_gpio_irq_enable;
143 nv_subdev(priv)->intr = nv50_gpio_intr;
144 return 0;
145}
146
147void
148nv50_gpio_dtor(struct nouveau_object *object)
149{
150 struct nv50_gpio_priv *priv = (void *)object;
151 nouveau_gpio_destroy(&priv->base);
152}
153
154int
155nv50_gpio_init(struct nouveau_object *object)
156{
157 struct nv50_gpio_priv *priv = (void *)object;
158 int ret;
159
160 ret = nouveau_gpio_init(&priv->base);
161 if (ret)
162 return ret;
163
164 /* disable, and ack any pending gpio interrupts */
165 nv_wr32(priv, 0xe050, 0x00000000);
166 nv_wr32(priv, 0xe054, 0xffffffff);
167 if (nv_device(priv)->chipset >= 0x90) {
168 nv_wr32(priv, 0xe070, 0x00000000);
169 nv_wr32(priv, 0xe074, 0xffffffff);
170 }
171
172 return 0;
173}
174
175int
176nv50_gpio_fini(struct nouveau_object *object, bool suspend)
177{
178 struct nv50_gpio_priv *priv = (void *)object;
179 nv_wr32(priv, 0xe050, 0x00000000);
180 if (nv_device(priv)->chipset >= 0x90)
181 nv_wr32(priv, 0xe070, 0x00000000);
182 return nouveau_gpio_fini(&priv->base, suspend);
183}
184
185struct nouveau_oclass
186nv50_gpio_oclass = {
187 .handle = NV_SUBDEV(GPIO, 0x50),
188 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nv50_gpio_ctor,
190 .dtor = nv50_gpio_dtor,
191 .init = nv50_gpio_init,
192 .fini = nv50_gpio_fini,
193 },
194};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
new file mode 100644
index 000000000000..8d18fcad26e0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27struct nvd0_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31static void
32nvd0_gpio_reset(struct nouveau_gpio *gpio)
33{
34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nvd0_gpio_priv *priv = (void *)gpio;
36 u16 entry;
37 u8 ver;
38 int ent = -1;
39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) {
41 u32 data = nv_ro32(bios, entry);
42 u8 line = (data & 0x0000003f);
43 u8 defs = !!(data & 0x00000080);
44 u8 func = (data & 0x0000ff00) >> 8;
45 u8 unk0 = (data & 0x00ff0000) >> 16;
46 u8 unk1 = (data & 0x1f000000) >> 24;
47
48 if (func == 0xff)
49 continue;
50
51 gpio->set(gpio, 0, func, line, defs);
52
53 nv_mask(priv, 0x00d610 + (line * 4), 0xff, unk0);
54 if (unk1--)
55 nv_mask(priv, 0x00d740 + (unk1 * 4), 0xff, line);
56 }
57}
58
59static int
60nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
61{
62 u32 data = ((dir ^ 1) << 13) | (out << 12);
63 nv_mask(gpio, 0x00d610 + (line * 4), 0x00003000, data);
64 nv_mask(gpio, 0x00d604, 0x00000001, 0x00000001); /* update? */
65 return 0;
66}
67
68static int
69nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
70{
71 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
72}
73
74static int
75nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
76 struct nouveau_oclass *oclass, void *data, u32 size,
77 struct nouveau_object **pobject)
78{
79 struct nvd0_gpio_priv *priv;
80 int ret;
81
82 ret = nouveau_gpio_create(parent, engine, oclass, &priv);
83 *pobject = nv_object(priv);
84 if (ret)
85 return ret;
86
87 priv->base.reset = nvd0_gpio_reset;
88 priv->base.drive = nvd0_gpio_drive;
89 priv->base.sense = nvd0_gpio_sense;
90 priv->base.irq_enable = nv50_gpio_irq_enable;
91 nv_subdev(priv)->intr = nv50_gpio_intr;
92 return 0;
93}
94
95struct nouveau_oclass
96nvd0_gpio_oclass = {
97 .handle = NV_SUBDEV(GPIO, 0xd0),
98 .ofuncs = &(struct nouveau_ofuncs) {
99 .ctor = nvd0_gpio_ctor,
100 .dtor = nv50_gpio_dtor,
101 .init = nv50_gpio_init,
102 .fini = nv50_gpio_fini,
103 },
104};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
new file mode 100644
index 000000000000..fe1ebf199ba9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26
27/******************************************************************************
28 * aux channel util functions
29 *****************************************************************************/
30#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
31#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
32
33static void
34auxch_fini(struct nouveau_i2c *aux, int ch)
35{
36 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
37}
38
39static int
40auxch_init(struct nouveau_i2c *aux, int ch)
41{
42 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
43 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
44 const u32 urep = unksel ? 0x01000000 : 0x02000000;
45 u32 ctrl, timeout;
46
47 /* wait up to 1ms for any previous transaction to be done... */
48 timeout = 1000;
49 do {
50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
51 udelay(1);
52 if (!timeout--) {
53 AUX_ERR("begin idle timeout 0x%08x", ctrl);
54 return -EBUSY;
55 }
56 } while (ctrl & 0x03010000);
57
58 /* set some magic, and wait up to 1ms for it to appear */
59 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
60 timeout = 1000;
61 do {
62 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
63 udelay(1);
64 if (!timeout--) {
65 AUX_ERR("magic wait 0x%08x\n", ctrl);
66 auxch_fini(aux, ch);
67 return -EBUSY;
68 }
69 } while ((ctrl & 0x03000000) != urep);
70
71 return 0;
72}
73
74static int
75auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
76{
77 u32 ctrl, stat, timeout, retries;
78 u32 xbuf[4] = {};
79 int ret, i;
80
81 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
82
83 ret = auxch_init(aux, ch);
84 if (ret)
85 goto out;
86
87 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
88 if (!(stat & 0x10000000)) {
89 AUX_DBG("sink not detected\n");
90 ret = -ENXIO;
91 goto out;
92 }
93
94 if (!(type & 1)) {
95 memcpy(xbuf, data, size);
96 for (i = 0; i < 16; i += 4) {
97 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
98 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
99 }
100 }
101
102 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
103 ctrl &= ~0x0001f0ff;
104 ctrl |= type << 12;
105 ctrl |= size - 1;
106 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
107
108 /* retry transaction a number of times on failure... */
109 ret = -EREMOTEIO;
110 for (retries = 0; retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
114 if (retries)
115 udelay(400);
116
117 /* transaction request, wait up to 1ms for it to complete */
118 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
119
120 timeout = 1000;
121 do {
122 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
123 udelay(1);
124 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
126 goto out;
127 }
128 } while (ctrl & 0x00010000);
129
130 /* read status, and check if transaction completed ok */
131 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
132 if (!(stat & 0x000f0f00)) {
133 ret = 0;
134 break;
135 }
136
137 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
138 }
139
140 if (type & 1) {
141 for (i = 0; i < 16; i += 4) {
142 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
143 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
144 }
145 memcpy(data, xbuf, size);
146 }
147
148out:
149 auxch_fini(aux, ch);
150 return ret;
151}
152
153int
154nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
155{
156 return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size);
157}
158
159int
160nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size)
161{
162 return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size);
163}
164
165static int
166aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
167{
168 struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap;
169 struct i2c_msg *msg = msgs;
170 int ret, mcnt = num;
171
172 while (mcnt--) {
173 u8 remaining = msg->len;
174 u8 *ptr = msg->buf;
175
176 while (remaining) {
177 u8 cnt = (remaining > 16) ? 16 : remaining;
178 u8 cmd;
179
180 if (msg->flags & I2C_M_RD)
181 cmd = 1;
182 else
183 cmd = 0;
184
185 if (mcnt || remaining > 16)
186 cmd |= 4; /* MOT */
187
188 ret = auxch_tx(auxch->i2c, auxch->drive, cmd,
189 msg->addr, ptr, cnt);
190 if (ret < 0)
191 return ret;
192
193 ptr += cnt;
194 remaining -= cnt;
195 }
196
197 msg++;
198 }
199
200 return num;
201}
202
203static u32
204aux_func(struct i2c_adapter *adap)
205{
206 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
207}
208
209const struct i2c_algorithm nouveau_i2c_aux_algo = {
210 .master_xfer = aux_xfer,
211 .functionality = aux_func
212};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
new file mode 100644
index 000000000000..3d2c88310f98
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -0,0 +1,407 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "core/option.h"
26
27#include "subdev/i2c.h"
28#include "subdev/vga.h"
29
30int
31nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
32{
33 u8 val;
34 struct i2c_msg msgs[] = {
35 { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
36 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
37 };
38
39 int ret = i2c_transfer(&port->adapter, msgs, 2);
40 if (ret != 2)
41 return -EIO;
42
43 return val;
44}
45
46int
47nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
48{
49 struct i2c_msg msgs[] = {
50 { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
51 { .addr = addr, .flags = 0, .len = 1, .buf = &val },
52 };
53
54 int ret = i2c_transfer(&port->adapter, msgs, 2);
55 if (ret != 2)
56 return -EIO;
57
58 return 0;
59}
60
61bool
62nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
63{
64 u8 buf[] = { 0 };
65 struct i2c_msg msgs[] = {
66 {
67 .addr = addr,
68 .flags = 0,
69 .len = 1,
70 .buf = buf,
71 },
72 {
73 .addr = addr,
74 .flags = I2C_M_RD,
75 .len = 1,
76 .buf = buf,
77 }
78 };
79
80 return i2c_transfer(&port->adapter, msgs, 2) == 2;
81}
82
83static struct nouveau_i2c_port *
84nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
85{
86 struct nouveau_bios *bios = nouveau_bios(i2c);
87 struct nouveau_i2c_port *port;
88
89 if (index == NV_I2C_DEFAULT(0) ||
90 index == NV_I2C_DEFAULT(1)) {
91 u8 ver, hdr, cnt, len;
92 u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
93 if (i2c && ver >= 0x30) {
94 u8 auxidx = nv_ro08(bios, i2c + 4);
95 if (index == NV_I2C_DEFAULT(0))
96 index = (auxidx & 0x0f) >> 0;
97 else
98 index = (auxidx & 0xf0) >> 4;
99 } else {
100 index = 2;
101 }
102 }
103
104 list_for_each_entry(port, &i2c->ports, head) {
105 if (port->index == index)
106 break;
107 }
108
109 if (&port->head == &i2c->ports)
110 return NULL;
111
112 if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) {
113 u32 reg = 0x00e500, val;
114 if (port->type == 6) {
115 reg += port->drive * 0x50;
116 val = 0x2002;
117 } else {
118 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
119 val = 0xe001;
120 }
121
122 /* nfi, but neither auxch or i2c work if it's 1 */
123 nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000);
124 /* nfi, but switches auxch vs normal i2c */
125 nv_mask(i2c, reg + 0x00, 0x0000f003, val);
126 }
127
128 return port;
129}
130
131static int
132nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
133 struct i2c_board_info *info,
134 bool (*match)(struct nouveau_i2c_port *,
135 struct i2c_board_info *))
136{
137 struct nouveau_i2c_port *port = nouveau_i2c_find(i2c, index);
138 int i;
139
140 if (!port) {
141 nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
142 return -ENODEV;
143 }
144
145 nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
146 for (i = 0; info[i].addr; i++) {
147 if (nv_probe_i2c(port, info[i].addr) &&
148 (!match || match(port, &info[i]))) {
149 nv_info(i2c, "detected %s: %s\n", what, info[i].type);
150 return i;
151 }
152 }
153
154 nv_debug(i2c, "no devices found.\n");
155 return -ENODEV;
156}
157
158void
159nouveau_i2c_drive_scl(void *data, int state)
160{
161 struct nouveau_i2c_port *port = data;
162
163 if (port->type == DCB_I2C_NV04_BIT) {
164 u8 val = nv_rdvgac(port->i2c, 0, port->drive);
165 if (state) val |= 0x20;
166 else val &= 0xdf;
167 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
168 } else
169 if (port->type == DCB_I2C_NV4E_BIT) {
170 nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
171 } else
172 if (port->type == DCB_I2C_NVIO_BIT) {
173 if (state) port->state |= 0x01;
174 else port->state &= 0xfe;
175 nv_wr32(port->i2c, port->drive, 4 | port->state);
176 }
177}
178
179void
180nouveau_i2c_drive_sda(void *data, int state)
181{
182 struct nouveau_i2c_port *port = data;
183
184 if (port->type == DCB_I2C_NV04_BIT) {
185 u8 val = nv_rdvgac(port->i2c, 0, port->drive);
186 if (state) val |= 0x10;
187 else val &= 0xef;
188 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
189 } else
190 if (port->type == DCB_I2C_NV4E_BIT) {
191 nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
192 } else
193 if (port->type == DCB_I2C_NVIO_BIT) {
194 if (state) port->state |= 0x02;
195 else port->state &= 0xfd;
196 nv_wr32(port->i2c, port->drive, 4 | port->state);
197 }
198}
199
200int
201nouveau_i2c_sense_scl(void *data)
202{
203 struct nouveau_i2c_port *port = data;
204 struct nouveau_device *device = nv_device(port->i2c);
205
206 if (port->type == DCB_I2C_NV04_BIT) {
207 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04);
208 } else
209 if (port->type == DCB_I2C_NV4E_BIT) {
210 return !!(nv_rd32(port->i2c, port->sense) & 0x00040000);
211 } else
212 if (port->type == DCB_I2C_NVIO_BIT) {
213 if (device->card_type < NV_D0)
214 return !!(nv_rd32(port->i2c, port->sense) & 0x01);
215 else
216 return !!(nv_rd32(port->i2c, port->sense) & 0x10);
217 }
218
219 return 0;
220}
221
222int
223nouveau_i2c_sense_sda(void *data)
224{
225 struct nouveau_i2c_port *port = data;
226 struct nouveau_device *device = nv_device(port->i2c);
227
228 if (port->type == DCB_I2C_NV04_BIT) {
229 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08);
230 } else
231 if (port->type == DCB_I2C_NV4E_BIT) {
232 return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
233 } else
234 if (port->type == DCB_I2C_NVIO_BIT) {
235 if (device->card_type < NV_D0)
236 return !!(nv_rd32(port->i2c, port->sense) & 0x02);
237 else
238 return !!(nv_rd32(port->i2c, port->sense) & 0x20);
239 }
240
241 return 0;
242}
243
244static const u32 nv50_i2c_port[] = {
245 0x00e138, 0x00e150, 0x00e168, 0x00e180,
246 0x00e254, 0x00e274, 0x00e764, 0x00e780,
247 0x00e79c, 0x00e7b8
248};
249
250static int
251nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
252 struct nouveau_oclass *oclass, void *data, u32 size,
253 struct nouveau_object **pobject)
254{
255 struct nouveau_device *device = nv_device(parent);
256 struct nouveau_bios *bios = nouveau_bios(parent);
257 struct nouveau_i2c_port *port;
258 struct nouveau_i2c *i2c;
259 struct dcb_i2c_entry info;
260 int ret, i = -1;
261
262 ret = nouveau_subdev_create(parent, engine, oclass, 0,
263 "I2C", "i2c", &i2c);
264 *pobject = nv_object(i2c);
265 if (ret)
266 return ret;
267
268 i2c->find = nouveau_i2c_find;
269 i2c->identify = nouveau_i2c_identify;
270 INIT_LIST_HEAD(&i2c->ports);
271
272 while (!dcb_i2c_parse(bios, ++i, &info)) {
273 if (info.type == DCB_I2C_UNUSED)
274 continue;
275
276 port = kzalloc(sizeof(*port), GFP_KERNEL);
277 if (!port) {
278 nv_error(i2c, "failed port memory alloc at %d\n", i);
279 break;
280 }
281
282 port->type = info.type;
283 switch (port->type) {
284 case DCB_I2C_NV04_BIT:
285 port->drive = info.drive;
286 port->sense = info.sense;
287 break;
288 case DCB_I2C_NV4E_BIT:
289 port->drive = 0x600800 + info.drive;
290 port->sense = port->drive;
291 break;
292 case DCB_I2C_NVIO_BIT:
293 port->drive = info.drive & 0x0f;
294 if (device->card_type < NV_D0) {
295 if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
296 break;
297 port->drive = nv50_i2c_port[port->drive];
298 port->sense = port->drive;
299 } else {
300 port->drive = 0x00d014 + (port->drive * 0x20);
301 port->sense = port->drive;
302 }
303 break;
304 case DCB_I2C_NVIO_AUX:
305 port->drive = info.drive & 0x0f;
306 port->sense = port->drive;
307 port->adapter.algo = &nouveau_i2c_aux_algo;
308 break;
309 default:
310 break;
311 }
312
313 if (!port->adapter.algo && !port->drive) {
314 nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
315 i, port->type, port->drive, port->sense);
316 kfree(port);
317 continue;
318 }
319
320 snprintf(port->adapter.name, sizeof(port->adapter.name),
321 "nouveau-%s-%d", device->name, i);
322 port->adapter.owner = THIS_MODULE;
323 port->adapter.dev.parent = &device->pdev->dev;
324 port->i2c = i2c;
325 port->index = i;
326 port->dcb = info.data;
327 i2c_set_adapdata(&port->adapter, i2c);
328
329 if (port->adapter.algo != &nouveau_i2c_aux_algo) {
330 nouveau_i2c_drive_scl(port, 0);
331 nouveau_i2c_drive_sda(port, 1);
332 nouveau_i2c_drive_scl(port, 1);
333
334#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
335 if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
336#else
337 if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
338#endif
339 port->adapter.algo = &nouveau_i2c_bit_algo;
340 ret = i2c_add_adapter(&port->adapter);
341 } else {
342 port->adapter.algo_data = &port->bit;
343 port->bit.udelay = 10;
344 port->bit.timeout = usecs_to_jiffies(2200);
345 port->bit.data = port;
346 port->bit.setsda = nouveau_i2c_drive_sda;
347 port->bit.setscl = nouveau_i2c_drive_scl;
348 port->bit.getsda = nouveau_i2c_sense_sda;
349 port->bit.getscl = nouveau_i2c_sense_scl;
350 ret = i2c_bit_add_bus(&port->adapter);
351 }
352 } else {
353 port->adapter.algo = &nouveau_i2c_aux_algo;
354 ret = i2c_add_adapter(&port->adapter);
355 }
356
357 if (ret) {
358 nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
359 kfree(port);
360 continue;
361 }
362
363 list_add_tail(&port->head, &i2c->ports);
364 }
365
366 return 0;
367}
368
369static void
370nouveau_i2c_dtor(struct nouveau_object *object)
371{
372 struct nouveau_i2c *i2c = (void *)object;
373 struct nouveau_i2c_port *port, *temp;
374
375 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
376 i2c_del_adapter(&port->adapter);
377 list_del(&port->head);
378 kfree(port);
379 }
380
381 nouveau_subdev_destroy(&i2c->base);
382}
383
384static int
385nouveau_i2c_init(struct nouveau_object *object)
386{
387 struct nouveau_i2c *i2c = (void *)object;
388 return nouveau_subdev_init(&i2c->base);
389}
390
391static int
392nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
393{
394 struct nouveau_i2c *i2c = (void *)object;
395 return nouveau_subdev_fini(&i2c->base, suspend);
396}
397
398struct nouveau_oclass
399nouveau_i2c_oclass = {
400 .handle = NV_SUBDEV(I2C, 0x00),
401 .ofuncs = &(struct nouveau_ofuncs) {
402 .ctor = nouveau_i2c_ctor,
403 .dtor = nouveau_i2c_dtor,
404 .init = nouveau_i2c_init,
405 .fini = nouveau_i2c_fini,
406 },
407};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
new file mode 100644
index 000000000000..1c4c9a5c8e2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -0,0 +1,230 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/i2c.h"
26
27#ifdef CONFIG_NOUVEAU_I2C_INTERNAL
28#define T_TIMEOUT 2200000
29#define T_RISEFALL 1000
30#define T_HOLD 5000
31
32static inline void
33i2c_drive_scl(struct nouveau_i2c_port *port, int state)
34{
35 nouveau_i2c_drive_scl(port, state);
36}
37
38static inline void
39i2c_drive_sda(struct nouveau_i2c_port *port, int state)
40{
41 nouveau_i2c_drive_sda(port, state);
42}
43
44static inline int
45i2c_sense_scl(struct nouveau_i2c_port *port)
46{
47 return nouveau_i2c_sense_scl(port);
48}
49
50static inline int
51i2c_sense_sda(struct nouveau_i2c_port *port)
52{
53 return nouveau_i2c_sense_sda(port);
54}
55
56static void
57i2c_delay(struct nouveau_i2c_port *port, u32 nsec)
58{
59 udelay((nsec + 500) / 1000);
60}
61
62static bool
63i2c_raise_scl(struct nouveau_i2c_port *port)
64{
65 u32 timeout = T_TIMEOUT / T_RISEFALL;
66
67 i2c_drive_scl(port, 1);
68 do {
69 i2c_delay(port, T_RISEFALL);
70 } while (!i2c_sense_scl(port) && --timeout);
71
72 return timeout != 0;
73}
74
75static int
76i2c_start(struct nouveau_i2c_port *port)
77{
78 int ret = 0;
79
80 port->state = i2c_sense_scl(port);
81 port->state |= i2c_sense_sda(port) << 1;
82 if (port->state != 3) {
83 i2c_drive_scl(port, 0);
84 i2c_drive_sda(port, 1);
85 if (!i2c_raise_scl(port))
86 ret = -EBUSY;
87 }
88
89 i2c_drive_sda(port, 0);
90 i2c_delay(port, T_HOLD);
91 i2c_drive_scl(port, 0);
92 i2c_delay(port, T_HOLD);
93 return ret;
94}
95
96static void
97i2c_stop(struct nouveau_i2c_port *port)
98{
99 i2c_drive_scl(port, 0);
100 i2c_drive_sda(port, 0);
101 i2c_delay(port, T_RISEFALL);
102
103 i2c_drive_scl(port, 1);
104 i2c_delay(port, T_HOLD);
105 i2c_drive_sda(port, 1);
106 i2c_delay(port, T_HOLD);
107}
108
109static int
110i2c_bitw(struct nouveau_i2c_port *port, int sda)
111{
112 i2c_drive_sda(port, sda);
113 i2c_delay(port, T_RISEFALL);
114
115 if (!i2c_raise_scl(port))
116 return -ETIMEDOUT;
117 i2c_delay(port, T_HOLD);
118
119 i2c_drive_scl(port, 0);
120 i2c_delay(port, T_HOLD);
121 return 0;
122}
123
124static int
125i2c_bitr(struct nouveau_i2c_port *port)
126{
127 int sda;
128
129 i2c_drive_sda(port, 1);
130 i2c_delay(port, T_RISEFALL);
131
132 if (!i2c_raise_scl(port))
133 return -ETIMEDOUT;
134 i2c_delay(port, T_HOLD);
135
136 sda = i2c_sense_sda(port);
137
138 i2c_drive_scl(port, 0);
139 i2c_delay(port, T_HOLD);
140 return sda;
141}
142
143static int
144i2c_get_byte(struct nouveau_i2c_port *port, u8 *byte, bool last)
145{
146 int i, bit;
147
148 *byte = 0;
149 for (i = 7; i >= 0; i--) {
150 bit = i2c_bitr(port);
151 if (bit < 0)
152 return bit;
153 *byte |= bit << i;
154 }
155
156 return i2c_bitw(port, last ? 1 : 0);
157}
158
159static int
160i2c_put_byte(struct nouveau_i2c_port *port, u8 byte)
161{
162 int i, ret;
163 for (i = 7; i >= 0; i--) {
164 ret = i2c_bitw(port, !!(byte & (1 << i)));
165 if (ret < 0)
166 return ret;
167 }
168
169 ret = i2c_bitr(port);
170 if (ret == 1) /* nack */
171 ret = -EIO;
172 return ret;
173}
174
175static int
176i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
177{
178 u32 addr = msg->addr << 1;
179 if (msg->flags & I2C_M_RD)
180 addr |= 1;
181 return i2c_put_byte(port, addr);
182}
183
184static int
185i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
186{
187 struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap;
188 struct i2c_msg *msg = msgs;
189 int ret = 0, mcnt = num;
190
191 while (!ret && mcnt--) {
192 u8 remaining = msg->len;
193 u8 *ptr = msg->buf;
194
195 ret = i2c_start(port);
196 if (ret == 0)
197 ret = i2c_addr(port, msg);
198
199 if (msg->flags & I2C_M_RD) {
200 while (!ret && remaining--)
201 ret = i2c_get_byte(port, ptr++, !remaining);
202 } else {
203 while (!ret && remaining--)
204 ret = i2c_put_byte(port, *ptr++);
205 }
206
207 msg++;
208 }
209
210 i2c_stop(port);
211 return (ret < 0) ? ret : num;
212}
213#else
214static int
215i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
216{
217 return -ENODEV;
218}
219#endif
220
221static u32
222i2c_bit_func(struct i2c_adapter *adap)
223{
224 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
225}
226
227const struct i2c_algorithm nouveau_i2c_bit_algo = {
228 .master_xfer = i2c_bit_xfer,
229 .functionality = i2c_bit_func
230};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
new file mode 100644
index 000000000000..4e977ff27e44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nvc0.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ibus.h>
26
27struct nvc0_ibus_priv {
28 struct nouveau_ibus base;
29};
30
31static void
32nvc0_ibus_intr_hub(struct nvc0_ibus_priv *priv, int i)
33{
34 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0400));
35 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0400));
36 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0400));
37 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
38 nv_mask(priv, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
39}
40
41static void
42nvc0_ibus_intr_rop(struct nvc0_ibus_priv *priv, int i)
43{
44 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0400));
45 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0400));
46 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0400));
47 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
48 nv_mask(priv, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
49}
50
51static void
52nvc0_ibus_intr_gpc(struct nvc0_ibus_priv *priv, int i)
53{
54 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0400));
55 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0400));
56 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0400));
57 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
58 nv_mask(priv, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
59}
60
61static void
62nvc0_ibus_intr(struct nouveau_subdev *subdev)
63{
64 struct nvc0_ibus_priv *priv = (void *)subdev;
65 u32 intr0 = nv_rd32(priv, 0x121c58);
66 u32 intr1 = nv_rd32(priv, 0x121c5c);
67 u32 hubnr = nv_rd32(priv, 0x121c70);
68 u32 ropnr = nv_rd32(priv, 0x121c74);
69 u32 gpcnr = nv_rd32(priv, 0x121c78);
70 u32 i;
71
72 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
73 u32 stat = 0x00000100 << i;
74 if (intr0 & stat) {
75 nvc0_ibus_intr_hub(priv, i);
76 intr0 &= ~stat;
77 }
78 }
79
80 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
81 u32 stat = 0x00010000 << i;
82 if (intr0 & stat) {
83 nvc0_ibus_intr_rop(priv, i);
84 intr0 &= ~stat;
85 }
86 }
87
88 for (i = 0; intr1 && i < gpcnr; i++) {
89 u32 stat = 0x00000001 << i;
90 if (intr1 & stat) {
91 nvc0_ibus_intr_gpc(priv, i);
92 intr1 &= ~stat;
93 }
94 }
95}
96
97static int
98nvc0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 struct nouveau_oclass *oclass, void *data, u32 size,
100 struct nouveau_object **pobject)
101{
102 struct nvc0_ibus_priv *priv;
103 int ret;
104
105 ret = nouveau_ibus_create(parent, engine, oclass, &priv);
106 *pobject = nv_object(priv);
107 if (ret)
108 return ret;
109
110 nv_subdev(priv)->intr = nvc0_ibus_intr;
111 return 0;
112}
113
114struct nouveau_oclass
115nvc0_ibus_oclass = {
116 .handle = NV_SUBDEV(IBUS, 0xc0),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nvc0_ibus_ctor,
119 .dtor = _nouveau_ibus_dtor,
120 .init = _nouveau_ibus_init,
121 .fini = _nouveau_ibus_fini,
122 },
123};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
new file mode 100644
index 000000000000..7120124dceac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
@@ -0,0 +1,123 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ibus.h>
26
27struct nve0_ibus_priv {
28 struct nouveau_ibus base;
29};
30
31static void
32nve0_ibus_intr_hub(struct nve0_ibus_priv *priv, int i)
33{
34 u32 addr = nv_rd32(priv, 0x122120 + (i * 0x0800));
35 u32 data = nv_rd32(priv, 0x122124 + (i * 0x0800));
36 u32 stat = nv_rd32(priv, 0x122128 + (i * 0x0800));
37 nv_error(priv, "HUB%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
38 nv_mask(priv, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
39}
40
41static void
42nve0_ibus_intr_rop(struct nve0_ibus_priv *priv, int i)
43{
44 u32 addr = nv_rd32(priv, 0x124120 + (i * 0x0800));
45 u32 data = nv_rd32(priv, 0x124124 + (i * 0x0800));
46 u32 stat = nv_rd32(priv, 0x124128 + (i * 0x0800));
47 nv_error(priv, "ROP%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
48 nv_mask(priv, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
49}
50
51static void
52nve0_ibus_intr_gpc(struct nve0_ibus_priv *priv, int i)
53{
54 u32 addr = nv_rd32(priv, 0x128120 + (i * 0x0800));
55 u32 data = nv_rd32(priv, 0x128124 + (i * 0x0800));
56 u32 stat = nv_rd32(priv, 0x128128 + (i * 0x0800));
57 nv_error(priv, "GPC%d: 0x%06x 0x%08x (0x%08x)\n", i, addr, data, stat);
58 nv_mask(priv, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
59}
60
61static void
62nve0_ibus_intr(struct nouveau_subdev *subdev)
63{
64 struct nve0_ibus_priv *priv = (void *)subdev;
65 u32 intr0 = nv_rd32(priv, 0x120058);
66 u32 intr1 = nv_rd32(priv, 0x12005c);
67 u32 hubnr = nv_rd32(priv, 0x120070);
68 u32 ropnr = nv_rd32(priv, 0x120074);
69 u32 gpcnr = nv_rd32(priv, 0x120078);
70 u32 i;
71
72 for (i = 0; (intr0 & 0x0000ff00) && i < hubnr; i++) {
73 u32 stat = 0x00000100 << i;
74 if (intr0 & stat) {
75 nve0_ibus_intr_hub(priv, i);
76 intr0 &= ~stat;
77 }
78 }
79
80 for (i = 0; (intr0 & 0xffff0000) && i < ropnr; i++) {
81 u32 stat = 0x00010000 << i;
82 if (intr0 & stat) {
83 nve0_ibus_intr_rop(priv, i);
84 intr0 &= ~stat;
85 }
86 }
87
88 for (i = 0; intr1 && i < gpcnr; i++) {
89 u32 stat = 0x00000001 << i;
90 if (intr1 & stat) {
91 nve0_ibus_intr_gpc(priv, i);
92 intr1 &= ~stat;
93 }
94 }
95}
96
97static int
98nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 struct nouveau_oclass *oclass, void *data, u32 size,
100 struct nouveau_object **pobject)
101{
102 struct nve0_ibus_priv *priv;
103 int ret;
104
105 ret = nouveau_ibus_create(parent, engine, oclass, &priv);
106 *pobject = nv_object(priv);
107 if (ret)
108 return ret;
109
110 nv_subdev(priv)->intr = nve0_ibus_intr;
111 return 0;
112}
113
114struct nouveau_oclass
115nve0_ibus_oclass = {
116 .handle = NV_SUBDEV(IBUS, 0xe0),
117 .ofuncs = &(struct nouveau_ofuncs) {
118 .ctor = nve0_ibus_ctor,
119 .dtor = _nouveau_ibus_dtor,
120 .init = _nouveau_ibus_init,
121 .fini = _nouveau_ibus_fini,
122 },
123};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
new file mode 100644
index 000000000000..1188227ca6aa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/instmem.h>
26
27int
28nouveau_instobj_create_(struct nouveau_object *parent,
29 struct nouveau_object *engine,
30 struct nouveau_oclass *oclass,
31 int length, void **pobject)
32{
33 struct nouveau_instmem *imem = (void *)engine;
34 struct nouveau_instobj *iobj;
35 int ret;
36
37 ret = nouveau_object_create_(parent, engine, oclass, NV_MEMOBJ_CLASS,
38 length, pobject);
39 iobj = *pobject;
40 if (ret)
41 return ret;
42
43 list_add(&iobj->head, &imem->list);
44 return 0;
45}
46
47void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{
50 if (iobj->head.prev)
51 list_del(&iobj->head);
52 return nouveau_object_destroy(&iobj->base);
53}
54
55void
56_nouveau_instobj_dtor(struct nouveau_object *object)
57{
58 struct nouveau_instobj *iobj = (void *)object;
59 return nouveau_instobj_destroy(iobj);
60}
61
62int
63nouveau_instmem_create_(struct nouveau_object *parent,
64 struct nouveau_object *engine,
65 struct nouveau_oclass *oclass,
66 int length, void **pobject)
67{
68 struct nouveau_instmem *imem;
69 int ret;
70
71 ret = nouveau_subdev_create_(parent, engine, oclass, 0,
72 "INSTMEM", "instmem", length, pobject);
73 imem = *pobject;
74 if (ret)
75 return ret;
76
77 INIT_LIST_HEAD(&imem->list);
78 return 0;
79}
80
81int
82nouveau_instmem_init(struct nouveau_instmem *imem)
83{
84 struct nouveau_instobj *iobj;
85 int ret, i;
86
87 ret = nouveau_subdev_init(&imem->base);
88 if (ret)
89 return ret;
90
91 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4)
94 nv_wo32(iobj, i, iobj->suspend[i / 4]);
95 vfree(iobj->suspend);
96 iobj->suspend = NULL;
97 }
98 }
99
100 return 0;
101}
102
103int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{
106 struct nouveau_instobj *iobj;
107 int i;
108
109 if (suspend) {
110 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4)
114 iobj->suspend[i / 4] = nv_ro32(iobj, i);
115 } else
116 return -ENOMEM;
117 }
118 }
119
120 return nouveau_subdev_fini(&imem->base, suspend);
121}
122
123int
124_nouveau_instmem_init(struct nouveau_object *object)
125{
126 struct nouveau_instmem *imem = (void *)object;
127 return nouveau_instmem_init(imem);
128}
129
130int
131_nouveau_instmem_fini(struct nouveau_object *object, bool suspend)
132{
133 struct nouveau_instmem *imem = (void *)object;
134 return nouveau_instmem_fini(imem, suspend);
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
new file mode 100644
index 000000000000..ba4d28b50368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/fb.h>
26
27#include "nv04.h"
28
29static int
30nv04_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
31 struct nouveau_oclass *oclass, void *data, u32 size,
32 struct nouveau_object **pobject)
33{
34 struct nv04_instmem_priv *priv = (void *)engine;
35 struct nv04_instobj_priv *node;
36 int ret, align;
37
38 align = (unsigned long)data;
39 if (!align)
40 align = 1;
41
42 ret = nouveau_instobj_create(parent, engine, oclass, &node);
43 *pobject = nv_object(node);
44 if (ret)
45 return ret;
46
47 ret = nouveau_mm_head(&priv->heap, 1, size, size, align, &node->mem);
48 if (ret)
49 return ret;
50
51 node->base.addr = node->mem->offset;
52 node->base.size = node->mem->length;
53 return 0;
54}
55
56static void
57nv04_instobj_dtor(struct nouveau_object *object)
58{
59 struct nv04_instmem_priv *priv = (void *)object->engine;
60 struct nv04_instobj_priv *node = (void *)object;
61 nouveau_mm_free(&priv->heap, &node->mem);
62 nouveau_instobj_destroy(&node->base);
63}
64
65static u32
66nv04_instobj_rd32(struct nouveau_object *object, u32 addr)
67{
68 struct nv04_instobj_priv *node = (void *)object;
69 return nv_ro32(object->engine, node->mem->offset + addr);
70}
71
72static void
73nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data)
74{
75 struct nv04_instobj_priv *node = (void *)object;
76 nv_wo32(object->engine, node->mem->offset + addr, data);
77}
78
79static struct nouveau_oclass
80nv04_instobj_oclass = {
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv04_instobj_ctor,
83 .dtor = nv04_instobj_dtor,
84 .init = _nouveau_instobj_init,
85 .fini = _nouveau_instobj_fini,
86 .rd32 = nv04_instobj_rd32,
87 .wr32 = nv04_instobj_wr32,
88 },
89};
90
91int
92nv04_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
93 u32 size, u32 align, struct nouveau_object **pobject)
94{
95 struct nouveau_object *engine = nv_object(imem);
96 struct nv04_instmem_priv *priv = (void *)(imem);
97 int ret;
98
99 ret = nouveau_object_ctor(parent, engine, &nv04_instobj_oclass,
100 (void *)(unsigned long)align, size, pobject);
101 if (ret)
102 return ret;
103
104 /* INSTMEM itself creates objects to reserve (and preserve across
105 * suspend/resume) various fixed data locations, each one of these
106 * takes a reference on INSTMEM itself, causing it to never be
107 * freed. We drop all the self-references here to avoid this.
108 */
109 if (unlikely(!priv->created))
110 atomic_dec(&engine->refcount);
111
112 return 0;
113}
114
115static int
116nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject)
119{
120 struct nv04_instmem_priv *priv;
121 int ret;
122
123 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
124 *pobject = nv_object(priv);
125 if (ret)
126 return ret;
127
128 /* PRAMIN aperture maps over the end of VRAM, reserve it */
129 priv->base.reserved = 512 * 1024;
130 priv->base.alloc = nv04_instmem_alloc;
131
132 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
133 if (ret)
134 return ret;
135
136 /* 0x00000-0x10000: reserve for probable vbios image */
137 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
138 if (ret)
139 return ret;
140
141 /* 0x10000-0x18000: reserve for RAMHT */
142 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
143 if (ret)
144 return ret;
145
146 /* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
147 ret = nouveau_gpuobj_new(parent, NULL, 0x00800, 0,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
149 if (ret)
150 return ret;
151
152 /* 0x18800-0x18a00: reserve for RAMRO */
153 ret = nouveau_gpuobj_new(parent, NULL, 0x00200, 0, 0, &priv->ramro);
154 if (ret)
155 return ret;
156
157 priv->created = true;
158 return 0;
159}
160
161void
162nv04_instmem_dtor(struct nouveau_object *object)
163{
164 struct nv04_instmem_priv *priv = (void *)object;
165 nouveau_gpuobj_ref(NULL, &priv->ramfc);
166 nouveau_gpuobj_ref(NULL, &priv->ramro);
167 nouveau_ramht_ref(NULL, &priv->ramht);
168 nouveau_gpuobj_ref(NULL, &priv->vbios);
169 nouveau_mm_fini(&priv->heap);
170 if (priv->iomem)
171 iounmap(priv->iomem);
172 nouveau_instmem_destroy(&priv->base);
173}
174
175static u32
176nv04_instmem_rd32(struct nouveau_object *object, u32 addr)
177{
178 return nv_rd32(object, 0x700000 + addr);
179}
180
181static void
182nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
183{
184 return nv_wr32(object, 0x700000 + addr, data);
185}
186
187struct nouveau_oclass
188nv04_instmem_oclass = {
189 .handle = NV_SUBDEV(INSTMEM, 0x04),
190 .ofuncs = &(struct nouveau_ofuncs) {
191 .ctor = nv04_instmem_ctor,
192 .dtor = nv04_instmem_dtor,
193 .init = _nouveau_instmem_init,
194 .fini = _nouveau_instmem_fini,
195 .rd32 = nv04_instmem_rd32,
196 .wr32 = nv04_instmem_wr32,
197 },
198};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
new file mode 100644
index 000000000000..7983d8d9b358
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -0,0 +1,39 @@
1#ifndef __NV04_INSTMEM_H__
2#define __NV04_INSTMEM_H__
3
4#include <core/gpuobj.h>
5#include <core/ramht.h>
6#include <core/mm.h>
7
8#include <subdev/instmem.h>
9
10struct nv04_instmem_priv {
11 struct nouveau_instmem base;
12 bool created;
13
14 void __iomem *iomem;
15 struct nouveau_mm heap;
16
17 struct nouveau_gpuobj *vbios;
18 struct nouveau_ramht *ramht;
19 struct nouveau_gpuobj *ramro;
20 struct nouveau_gpuobj *ramfc;
21};
22
23static inline struct nv04_instmem_priv *
24nv04_instmem(void *obj)
25{
26 return (void *)nouveau_instmem(obj);
27}
28
29struct nv04_instobj_priv {
30 struct nouveau_instobj base;
31 struct nouveau_mm_node *mem;
32};
33
34void nv04_instmem_dtor(struct nouveau_object *);
35
36int nv04_instmem_alloc(struct nouveau_instmem *, struct nouveau_object *,
37 u32 size, u32 align, struct nouveau_object **pobject);
38
39#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
new file mode 100644
index 000000000000..73c52ebd5932
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -0,0 +1,138 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv04.h"
26
27static inline int
28nv44_graph_class(struct nv04_instmem_priv *priv)
29{
30 if ((nv_device(priv)->chipset & 0xf0) == 0x60)
31 return 1;
32 return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
33}
34
35static int
36nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
37 struct nouveau_oclass *oclass, void *data, u32 size,
38 struct nouveau_object **pobject)
39{
40 struct nouveau_device *device = nv_device(parent);
41 struct pci_dev *pdev = device->pdev;
42 struct nv04_instmem_priv *priv;
43 int ret, bar, vs;
44
45 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
46 *pobject = nv_object(priv);
47 if (ret)
48 return ret;
49
50 /* map bar */
51 if (pci_resource_len(pdev, 2))
52 bar = 2;
53 else
54 bar = 3;
55
56 priv->iomem = ioremap(pci_resource_start(pdev, bar),
57 pci_resource_len(pdev, bar));
58 if (!priv->iomem) {
59 nv_error(priv, "unable to map PRAMIN BAR\n");
60 return -EFAULT;
61 }
62
63 /* PRAMIN aperture maps over the end of vram, reserve enough space
64 * to fit graphics contexts for every channel, the magics come
65 * from engine/graph/nv40.c
66 */
67 vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
68 if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
69 else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs;
70 else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs;
71 else priv->base.reserved = 0x4a40 * vs;
72 priv->base.reserved += 16 * 1024;
73 priv->base.reserved *= 32; /* per-channel */
74 priv->base.reserved += 512 * 1024; /* pci(e)gart table */
75 priv->base.reserved += 512 * 1024; /* object storage */
76
77 priv->base.reserved = round_up(priv->base.reserved, 4096);
78 priv->base.alloc = nv04_instmem_alloc;
79
80 ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
81 if (ret)
82 return ret;
83
84 /* 0x00000-0x10000: reserve for probable vbios image */
85 ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0, 0, &priv->vbios);
86 if (ret)
87 return ret;
88
89 /* 0x10000-0x18000: reserve for RAMHT */
90 ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
91 if (ret)
92 return ret;
93
94 /* 0x18000-0x18200: reserve for RAMRO
95 * 0x18200-0x20000: padding
96 */
97 ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0, 0, &priv->ramro);
98 if (ret)
99 return ret;
100
101 /* 0x20000-0x21000: reserve for RAMFC
102 * 0x21000-0x40000: padding and some unknown crap
103 */
104 ret = nouveau_gpuobj_new(parent, NULL, 0x20000, 0,
105 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
106 if (ret)
107 return ret;
108
109 priv->created = true;
110 return 0;
111}
112
113static u32
114nv40_instmem_rd32(struct nouveau_object *object, u32 addr)
115{
116 struct nv04_instmem_priv *priv = (void *)object;
117 return ioread32_native(priv->iomem + addr);
118}
119
120static void
121nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data)
122{
123 struct nv04_instmem_priv *priv = (void *)object;
124 iowrite32_native(data, priv->iomem + addr);
125}
126
127struct nouveau_oclass
128nv40_instmem_oclass = {
129 .handle = NV_SUBDEV(INSTMEM, 0x40),
130 .ofuncs = &(struct nouveau_ofuncs) {
131 .ctor = nv40_instmem_ctor,
132 .dtor = nv04_instmem_dtor,
133 .init = _nouveau_instmem_init,
134 .fini = _nouveau_instmem_fini,
135 .rd32 = nv40_instmem_rd32,
136 .wr32 = nv40_instmem_wr32,
137 },
138};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
new file mode 100644
index 000000000000..27ef0891d10b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/instmem.h>
26#include <subdev/fb.h>
27
28#include <core/mm.h>
29
30struct nv50_instmem_priv {
31 struct nouveau_instmem base;
32 spinlock_t lock;
33 u64 addr;
34};
35
36struct nv50_instobj_priv {
37 struct nouveau_instobj base;
38 struct nouveau_mem *mem;
39};
40
41static int
42nv50_instobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
43 struct nouveau_oclass *oclass, void *data, u32 size,
44 struct nouveau_object **pobject)
45{
46 struct nouveau_fb *pfb = nouveau_fb(parent);
47 struct nv50_instobj_priv *node;
48 u32 align = (unsigned long)data;
49 int ret;
50
51 size = max((size + 4095) & ~4095, (u32)4096);
52 align = max((align + 4095) & ~4095, (u32)4096);
53
54 ret = nouveau_instobj_create(parent, engine, oclass, &node);
55 *pobject = nv_object(node);
56 if (ret)
57 return ret;
58
59 ret = pfb->ram.get(pfb, size, align, 0, 0x800, &node->mem);
60 if (ret)
61 return ret;
62
63 node->base.addr = node->mem->offset;
64 node->base.size = node->mem->size << 12;
65 node->mem->page_shift = 12;
66 return 0;
67}
68
69static void
70nv50_instobj_dtor(struct nouveau_object *object)
71{
72 struct nv50_instobj_priv *node = (void *)object;
73 struct nouveau_fb *pfb = nouveau_fb(object);
74 pfb->ram.put(pfb, &node->mem);
75 nouveau_instobj_destroy(&node->base);
76}
77
78static u32
79nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
80{
81 struct nv50_instmem_priv *priv = (void *)object->engine;
82 struct nv50_instobj_priv *node = (void *)object;
83 unsigned long flags;
84 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
85 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
86 u32 data;
87
88 spin_lock_irqsave(&priv->lock, flags);
89 if (unlikely(priv->addr != base)) {
90 nv_wr32(priv, 0x001700, base >> 16);
91 priv->addr = base;
92 }
93 data = nv_rd32(priv, 0x700000 + addr);
94 spin_unlock_irqrestore(&priv->lock, flags);
95 return data;
96}
97
98static void
99nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data)
100{
101 struct nv50_instmem_priv *priv = (void *)object->engine;
102 struct nv50_instobj_priv *node = (void *)object;
103 unsigned long flags;
104 u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
105 u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
106
107 spin_lock_irqsave(&priv->lock, flags);
108 if (unlikely(priv->addr != base)) {
109 nv_wr32(priv, 0x001700, base >> 16);
110 priv->addr = base;
111 }
112 nv_wr32(priv, 0x700000 + addr, data);
113 spin_unlock_irqrestore(&priv->lock, flags);
114}
115
116static struct nouveau_oclass
117nv50_instobj_oclass = {
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nv50_instobj_ctor,
120 .dtor = nv50_instobj_dtor,
121 .init = _nouveau_instobj_init,
122 .fini = _nouveau_instobj_fini,
123 .rd32 = nv50_instobj_rd32,
124 .wr32 = nv50_instobj_wr32,
125 },
126};
127
128static int
129nv50_instmem_alloc(struct nouveau_instmem *imem, struct nouveau_object *parent,
130 u32 size, u32 align, struct nouveau_object **pobject)
131{
132 struct nouveau_object *engine = nv_object(imem);
133 return nouveau_object_ctor(parent, engine, &nv50_instobj_oclass,
134 (void *)(unsigned long)align, size, pobject);
135}
136
137static int
138nv50_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
139 struct nouveau_oclass *oclass, void *data, u32 size,
140 struct nouveau_object **pobject)
141{
142 struct nv50_instmem_priv *priv;
143 int ret;
144
145 ret = nouveau_instmem_create(parent, engine, oclass, &priv);
146 *pobject = nv_object(priv);
147 if (ret)
148 return ret;
149
150 spin_lock_init(&priv->lock);
151 priv->base.alloc = nv50_instmem_alloc;
152 return 0;
153}
154
155static int
156nv50_instmem_fini(struct nouveau_object *object, bool suspend)
157{
158 struct nv50_instmem_priv *priv = (void *)object;
159 priv->addr = ~0ULL;
160 return nouveau_instmem_fini(&priv->base, suspend);
161}
162
163struct nouveau_oclass
164nv50_instmem_oclass = {
165 .handle = NV_SUBDEV(INSTMEM, 0x50),
166 .ofuncs = &(struct nouveau_ofuncs) {
167 .ctor = nv50_instmem_ctor,
168 .dtor = _nouveau_instmem_dtor,
169 .init = _nouveau_instmem_init,
170 .fini = nv50_instmem_fini,
171 },
172};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
new file mode 100644
index 000000000000..078a2b9d6bd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/ltcg.h>
26
27struct nvc0_ltcg_priv {
28 struct nouveau_ltcg base;
29 u32 subp_nr;
30};
31
32static void
33nvc0_ltcg_subp_isr(struct nvc0_ltcg_priv *priv, int unit, int subp)
34{
35 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
36 u32 stat = nv_rd32(priv, subp_base + 0x020);
37
38 if (stat) {
39 nv_info(priv, "LTC%d_LTS%d: 0x%08x\n", unit, subp, stat);
40 nv_wr32(priv, subp_base + 0x020, stat);
41 }
42}
43
44static void
45nvc0_ltcg_intr(struct nouveau_subdev *subdev)
46{
47 struct nvc0_ltcg_priv *priv = (void *)subdev;
48 u32 units;
49
50 units = nv_rd32(priv, 0x00017c);
51 while (units) {
52 u32 subp, unit = ffs(units) - 1;
53 for (subp = 0; subp < priv->subp_nr; subp++)
54 nvc0_ltcg_subp_isr(priv, unit, subp);
55 units &= ~(1 << unit);
56 }
57
58 /* we do something horribly wrong and upset PMFB a lot, so mask off
59 * interrupts from it after the first one until it's fixed
60 */
61 nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
62}
63
64static int
65nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_ltcg_priv *priv;
70 int ret;
71
72 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
73 *pobject = nv_object(priv);
74 if (ret)
75 return ret;
76
77 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 24;
78 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
79
80 nv_subdev(priv)->intr = nvc0_ltcg_intr;
81 return 0;
82}
83
84struct nouveau_oclass
85nvc0_ltcg_oclass = {
86 .handle = NV_SUBDEV(LTCG, 0xc0),
87 .ofuncs = &(struct nouveau_ofuncs) {
88 .ctor = nvc0_ltcg_ctor,
89 .dtor = _nouveau_ltcg_dtor,
90 .init = _nouveau_ltcg_init,
91 .fini = _nouveau_ltcg_fini,
92 },
93};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
new file mode 100644
index 000000000000..de5721cfc4c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27void
28nouveau_mc_intr(struct nouveau_subdev *subdev)
29{
30 struct nouveau_mc *pmc = nouveau_mc(subdev);
31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit;
33 u32 stat;
34
35 stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) {
37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit);
39 if (unit && unit->intr)
40 unit->intr(unit);
41 stat &= ~map->stat;
42 }
43 map++;
44 }
45
46 if (stat) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 }
49}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
new file mode 100644
index 000000000000..23ebe477a6f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -0,0 +1,83 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv04_mc_priv {
28 struct nouveau_mc base;
29};
30
31const struct nouveau_mc_intr
32nv04_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG }, /* NV17- MPEG/ME */
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00020000, NVDEV_ENGINE_VP }, /* NV40- */
37 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
39 { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
40 { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */
41 { 0x80000000, NVDEV_ENGINE_SW },
42 {}
43};
44
45static int
46nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv04_mc_priv *priv;
51 int ret;
52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv);
55 if (ret)
56 return ret;
57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv04_mc_intr;
60 return 0;
61}
62
63int
64nv04_mc_init(struct nouveau_object *object)
65{
66 struct nv04_mc_priv *priv = (void *)object;
67
68 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
69 nv_wr32(priv, 0x001850, 0x00000001); /* disable rom access */
70
71 return nouveau_mc_init(&priv->base);
72}
73
74struct nouveau_oclass
75nv04_mc_oclass = {
76 .handle = NV_SUBDEV(MC, 0x04),
77 .ofuncs = &(struct nouveau_ofuncs) {
78 .ctor = nv04_mc_ctor,
79 .dtor = _nouveau_mc_dtor,
80 .init = nv04_mc_init,
81 .fini = _nouveau_mc_fini,
82 },
83};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
new file mode 100644
index 000000000000..397d868359ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv44_mc_priv {
28 struct nouveau_mc base;
29};
30
31static int
32nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
33 struct nouveau_oclass *oclass, void *data, u32 size,
34 struct nouveau_object **pobject)
35{
36 struct nv44_mc_priv *priv;
37 int ret;
38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv);
40 *pobject = nv_object(priv);
41 if (ret)
42 return ret;
43
44 nv_subdev(priv)->intr = nouveau_mc_intr;
45 priv->base.intr_map = nv04_mc_intr;
46 return 0;
47}
48
49static int
50nv44_mc_init(struct nouveau_object *object)
51{
52 struct nv44_mc_priv *priv = (void *)object;
53 u32 tmp = nv_rd32(priv, 0x10020c);
54
55 nv_wr32(priv, 0x000200, 0xffffffff); /* everything enabled */
56
57 nv_wr32(priv, 0x001700, tmp);
58 nv_wr32(priv, 0x001704, 0);
59 nv_wr32(priv, 0x001708, 0);
60 nv_wr32(priv, 0x00170c, tmp);
61
62 return nouveau_mc_init(&priv->base);
63}
64
65struct nouveau_oclass
66nv44_mc_oclass = {
67 .handle = NV_SUBDEV(MC, 0x44),
68 .ofuncs = &(struct nouveau_ofuncs) {
69 .ctor = nv44_mc_ctor,
70 .dtor = _nouveau_mc_dtor,
71 .init = nv44_mc_init,
72 .fini = _nouveau_mc_fini,
73 },
74};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
new file mode 100644
index 000000000000..cedf33b02977
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv50_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv50_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_MPEG },
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84- */
37 { 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
38 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x80000000, NVDEV_ENGINE_SW },
42 {},
43};
44
45static int
46nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
47 struct nouveau_oclass *oclass, void *data, u32 size,
48 struct nouveau_object **pobject)
49{
50 struct nv50_mc_priv *priv;
51 int ret;
52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv);
54 *pobject = nv_object(priv);
55 if (ret)
56 return ret;
57
58 nv_subdev(priv)->intr = nouveau_mc_intr;
59 priv->base.intr_map = nv50_mc_intr;
60 return 0;
61}
62
63int
64nv50_mc_init(struct nouveau_object *object)
65{
66 struct nv50_mc_priv *priv = (void *)object;
67 nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
68 return nouveau_mc_init(&priv->base);
69}
70
71struct nouveau_oclass
72nv50_mc_oclass = {
73 .handle = NV_SUBDEV(MC, 0x50),
74 .ofuncs = &(struct nouveau_ofuncs) {
75 .ctor = nv50_mc_ctor,
76 .dtor = _nouveau_mc_dtor,
77 .init = nv50_mc_init,
78 .fini = _nouveau_mc_fini,
79 },
80};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
new file mode 100644
index 000000000000..a001e4c4d38d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nv98_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nv98_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000100, NVDEV_ENGINE_FIFO },
35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x80000000, NVDEV_ENGINE_SW },
43 {},
44};
45
46static int
47nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
48 struct nouveau_oclass *oclass, void *data, u32 size,
49 struct nouveau_object **pobject)
50{
51 struct nv98_mc_priv *priv;
52 int ret;
53
54 ret = nouveau_mc_create(parent, engine, oclass, &priv);
55 *pobject = nv_object(priv);
56 if (ret)
57 return ret;
58
59 nv_subdev(priv)->intr = nouveau_mc_intr;
60 priv->base.intr_map = nv98_mc_intr;
61 return 0;
62}
63
64struct nouveau_oclass
65nv98_mc_oclass = {
66 .handle = NV_SUBDEV(MC, 0x98),
67 .ofuncs = &(struct nouveau_ofuncs) {
68 .ctor = nv98_mc_ctor,
69 .dtor = _nouveau_mc_dtor,
70 .init = nv50_mc_init,
71 .fini = _nouveau_mc_fini,
72 },
73};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
new file mode 100644
index 000000000000..c2b81e30a17d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mc.h>
26
27struct nvc0_mc_priv {
28 struct nouveau_mc base;
29};
30
31static const struct nouveau_mc_intr
32nvc0_mc_intr[] = {
33 { 0x00000001, NVDEV_ENGINE_PPP },
34 { 0x00000020, NVDEV_ENGINE_COPY0 },
35 { 0x00000040, NVDEV_ENGINE_COPY1 },
36 { 0x00000100, NVDEV_ENGINE_FIFO },
37 { 0x00001000, NVDEV_ENGINE_GR },
38 { 0x00008000, NVDEV_ENGINE_BSP },
39 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO },
41 { 0x02000000, NVDEV_SUBDEV_LTCG },
42 { 0x04000000, NVDEV_ENGINE_DISP },
43 { 0x40000000, NVDEV_SUBDEV_IBUS },
44 { 0x80000000, NVDEV_ENGINE_SW },
45 {},
46};
47
48static int
49nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nvc0_mc_priv *priv;
54 int ret;
55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv);
57 *pobject = nv_object(priv);
58 if (ret)
59 return ret;
60
61 nv_subdev(priv)->intr = nouveau_mc_intr;
62 priv->base.intr_map = nvc0_mc_intr;
63 return 0;
64}
65
66struct nouveau_oclass
67nvc0_mc_oclass = {
68 .handle = NV_SUBDEV(MC, 0xc0),
69 .ofuncs = &(struct nouveau_ofuncs) {
70 .ctor = nvc0_mc_ctor,
71 .dtor = _nouveau_mc_dtor,
72 .init = nv50_mc_init,
73 .fini = _nouveau_mc_fini,
74 },
75};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
new file mode 100644
index 000000000000..93e3ddf7303a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/option.h>
26
27#include <subdev/i2c.h>
28#include <subdev/mxm.h>
29#include <subdev/bios.h>
30#include <subdev/bios/mxm.h>
31
32#include "mxms.h"
33
34static bool
35mxm_shadow_rom_fetch(struct nouveau_i2c_port *i2c, u8 addr,
36 u8 offset, u8 size, u8 *data)
37{
38 struct i2c_msg msgs[] = {
39 { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
40 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
41 };
42
43 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
44}
45
46static bool
47mxm_shadow_rom(struct nouveau_mxm *mxm, u8 version)
48{
49 struct nouveau_bios *bios = nouveau_bios(mxm);
50 struct nouveau_i2c *i2c = nouveau_i2c(mxm);
51 struct nouveau_i2c_port *port = NULL;
52 u8 i2cidx, mxms[6], addr, size;
53
54 i2cidx = mxm_ddc_map(bios, 1 /* LVDS_DDC */) & 0x0f;
55 if (i2cidx < 0x0f)
56 port = i2c->find(i2c, i2cidx);
57 if (!port)
58 return false;
59
60 addr = 0x54;
61 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms)) {
62 addr = 0x56;
63 if (!mxm_shadow_rom_fetch(port, addr, 0, 6, mxms))
64 return false;
65 }
66
67 mxm->mxms = mxms;
68 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
69 mxm->mxms = kmalloc(size, GFP_KERNEL);
70
71 if (mxm->mxms &&
72 mxm_shadow_rom_fetch(port, addr, 0, size, mxm->mxms))
73 return true;
74
75 kfree(mxm->mxms);
76 mxm->mxms = NULL;
77 return false;
78}
79
80#if defined(CONFIG_ACPI)
81static bool
82mxm_shadow_dsm(struct nouveau_mxm *mxm, u8 version)
83{
84 struct nouveau_device *device = nv_device(mxm);
85 static char muid[] = {
86 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
87 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
88 };
89 u32 mxms_args[] = { 0x00000000 };
90 union acpi_object args[4] = {
91 /* _DSM MUID */
92 { .buffer.type = 3,
93 .buffer.length = sizeof(muid),
94 .buffer.pointer = muid,
95 },
96 /* spec says this can be zero to mean "highest revision", but
97 * of course there's at least one bios out there which fails
98 * unless you pass in exactly the version it supports..
99 */
100 { .integer.type = ACPI_TYPE_INTEGER,
101 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
102 },
103 /* MXMS function */
104 { .integer.type = ACPI_TYPE_INTEGER,
105 .integer.value = 0x00000010,
106 },
107 /* Pointer to MXMS arguments */
108 { .buffer.type = ACPI_TYPE_BUFFER,
109 .buffer.length = sizeof(mxms_args),
110 .buffer.pointer = (char *)mxms_args,
111 },
112 };
113 struct acpi_object_list list = { ARRAY_SIZE(args), args };
114 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
115 union acpi_object *obj;
116 acpi_handle handle;
117 int ret;
118
119 handle = DEVICE_ACPI_HANDLE(&device->pdev->dev);
120 if (!handle)
121 return false;
122
123 ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
124 if (ret) {
125 nv_debug(mxm, "DSM MXMS failed: %d\n", ret);
126 return false;
127 }
128
129 obj = retn.pointer;
130 if (obj->type == ACPI_TYPE_BUFFER) {
131 mxm->mxms = kmemdup(obj->buffer.pointer,
132 obj->buffer.length, GFP_KERNEL);
133 } else
134 if (obj->type == ACPI_TYPE_INTEGER) {
135 nv_debug(mxm, "DSM MXMS returned 0x%llx\n", obj->integer.value);
136 }
137
138 kfree(obj);
139 return mxm->mxms != NULL;
140}
141#endif
142
143#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
144
145#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
146
147static u8
148wmi_wmmx_mxmi(struct nouveau_mxm *mxm, u8 version)
149{
150 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
151 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
152 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
153 union acpi_object *obj;
154 acpi_status status;
155
156 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
157 if (ACPI_FAILURE(status)) {
158 nv_debug(mxm, "WMMX MXMI returned %d\n", status);
159 return 0x00;
160 }
161
162 obj = retn.pointer;
163 if (obj->type == ACPI_TYPE_INTEGER) {
164 version = obj->integer.value;
165 nv_debug(mxm, "WMMX MXMI version %d.%d\n",
166 (version >> 4), version & 0x0f);
167 } else {
168 version = 0;
169 nv_debug(mxm, "WMMX MXMI returned non-integer\n");
170 }
171
172 kfree(obj);
173 return version;
174}
175
176static bool
177mxm_shadow_wmi(struct nouveau_mxm *mxm, u8 version)
178{
179 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
180 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
181 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
182 union acpi_object *obj;
183 acpi_status status;
184
185 if (!wmi_has_guid(WMI_WMMX_GUID)) {
186 nv_debug(mxm, "WMMX GUID not found\n");
187 return false;
188 }
189
190 mxms_args[1] = wmi_wmmx_mxmi(mxm, 0x00);
191 if (!mxms_args[1])
192 mxms_args[1] = wmi_wmmx_mxmi(mxm, version);
193 if (!mxms_args[1])
194 return false;
195
196 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
197 if (ACPI_FAILURE(status)) {
198 nv_debug(mxm, "WMMX MXMS returned %d\n", status);
199 return false;
200 }
201
202 obj = retn.pointer;
203 if (obj->type == ACPI_TYPE_BUFFER) {
204 mxm->mxms = kmemdup(obj->buffer.pointer,
205 obj->buffer.length, GFP_KERNEL);
206 }
207
208 kfree(obj);
209 return mxm->mxms != NULL;
210}
211#endif
212
213static struct mxm_shadow_h {
214 const char *name;
215 bool (*exec)(struct nouveau_mxm *, u8 version);
216} _mxm_shadow[] = {
217 { "ROM", mxm_shadow_rom },
218#if defined(CONFIG_ACPI)
219 { "DSM", mxm_shadow_dsm },
220#endif
221#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
222 { "WMI", mxm_shadow_wmi },
223#endif
224 {}
225};
226
227static int
228mxm_shadow(struct nouveau_mxm *mxm, u8 version)
229{
230 struct mxm_shadow_h *shadow = _mxm_shadow;
231 do {
232 nv_debug(mxm, "checking %s\n", shadow->name);
233 if (shadow->exec(mxm, version)) {
234 if (mxms_valid(mxm))
235 return 0;
236 kfree(mxm->mxms);
237 mxm->mxms = NULL;
238 }
239 } while ((++shadow)->name);
240 return -ENOENT;
241}
242
243int
244nouveau_mxm_create_(struct nouveau_object *parent,
245 struct nouveau_object *engine,
246 struct nouveau_oclass *oclass, int length, void **pobject)
247{
248 struct nouveau_device *device = nv_device(parent);
249 struct nouveau_bios *bios = nouveau_bios(device);
250 struct nouveau_mxm *mxm;
251 u8 ver, len;
252 u16 data;
253 int ret;
254
255 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "MXM", "mxm",
256 length, pobject);
257 mxm = *pobject;
258 if (ret)
259 return ret;
260
261 data = mxm_table(bios, &ver, &len);
262 if (!data || !(ver = nv_ro08(bios, data))) {
263 nv_info(mxm, "no VBIOS data, nothing to do\n");
264 return 0;
265 }
266
267 nv_info(mxm, "BIOS version %d.%d\n", ver >> 4, ver & 0x0f);
268
269 if (mxm_shadow(mxm, ver)) {
270 nv_info(mxm, "failed to locate valid SIS\n");
271#if 0
272 /* we should, perhaps, fall back to some kind of limited
273 * mode here if the x86 vbios hasn't already done the
274 * work for us (so we prevent loading with completely
275 * whacked vbios tables).
276 */
277 return -EINVAL;
278#else
279 return 0;
280#endif
281 }
282
283 nv_info(mxm, "MXMS Version %d.%d\n",
284 mxms_version(mxm) >> 8, mxms_version(mxm) & 0xff);
285 mxms_foreach(mxm, 0, NULL, NULL);
286
287 if (nouveau_boolopt(device->cfgopt, "NvMXMDCB", true))
288 mxm->action |= MXM_SANITISE_DCB;
289 return 0;
290}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
new file mode 100644
index 000000000000..839ca1edc132
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mxm.h>
26#include "mxms.h"
27
28#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
29#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
30
31static u8 *
32mxms_data(struct nouveau_mxm *mxm)
33{
34 return mxm->mxms;
35
36}
37
38u16
39mxms_version(struct nouveau_mxm *mxm)
40{
41 u8 *mxms = mxms_data(mxm);
42 u16 version = (mxms[4] << 8) | mxms[5];
43 switch (version ) {
44 case 0x0200:
45 case 0x0201:
46 case 0x0300:
47 return version;
48 default:
49 break;
50 }
51
52 nv_debug(mxm, "unknown version %d.%d\n", mxms[4], mxms[5]);
53 return 0x0000;
54}
55
56u16
57mxms_headerlen(struct nouveau_mxm *mxm)
58{
59 return 8;
60}
61
62u16
63mxms_structlen(struct nouveau_mxm *mxm)
64{
65 return *(u16 *)&mxms_data(mxm)[6];
66}
67
68bool
69mxms_checksum(struct nouveau_mxm *mxm)
70{
71 u16 size = mxms_headerlen(mxm) + mxms_structlen(mxm);
72 u8 *mxms = mxms_data(mxm), sum = 0;
73 while (size--)
74 sum += *mxms++;
75 if (sum) {
76 nv_debug(mxm, "checksum invalid\n");
77 return false;
78 }
79 return true;
80}
81
82bool
83mxms_valid(struct nouveau_mxm *mxm)
84{
85 u8 *mxms = mxms_data(mxm);
86 if (*(u32 *)mxms != 0x5f4d584d) {
87 nv_debug(mxm, "signature invalid\n");
88 return false;
89 }
90
91 if (!mxms_version(mxm) || !mxms_checksum(mxm))
92 return false;
93
94 return true;
95}
96
97bool
98mxms_foreach(struct nouveau_mxm *mxm, u8 types,
99 bool (*exec)(struct nouveau_mxm *, u8 *, void *), void *info)
100{
101 u8 *mxms = mxms_data(mxm);
102 u8 *desc = mxms + mxms_headerlen(mxm);
103 u8 *fini = desc + mxms_structlen(mxm) - 1;
104 while (desc < fini) {
105 u8 type = desc[0] & 0x0f;
106 u8 headerlen = 0;
107 u8 recordlen = 0;
108 u8 entries = 0;
109
110 switch (type) {
111 case 0: /* Output Device Structure */
112 if (mxms_version(mxm) >= 0x0300)
113 headerlen = 8;
114 else
115 headerlen = 6;
116 break;
117 case 1: /* System Cooling Capability Structure */
118 case 2: /* Thermal Structure */
119 case 3: /* Input Power Structure */
120 headerlen = 4;
121 break;
122 case 4: /* GPIO Device Structure */
123 headerlen = 4;
124 recordlen = 2;
125 entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
126 break;
127 case 5: /* Vendor Specific Structure */
128 headerlen = 8;
129 break;
130 case 6: /* Backlight Control Structure */
131 if (mxms_version(mxm) >= 0x0300) {
132 headerlen = 4;
133 recordlen = 8;
134 entries = (desc[1] & 0xf0) >> 4;
135 } else {
136 headerlen = 8;
137 }
138 break;
139 case 7: /* Fan Control Structure */
140 headerlen = 8;
141 recordlen = 4;
142 entries = desc[1] & 0x07;
143 break;
144 default:
145 nv_debug(mxm, "unknown descriptor type %d\n", type);
146 return false;
147 }
148
149 if (nv_subdev(mxm)->debug >= NV_DBG_DEBUG && (exec == NULL)) {
150 static const char * mxms_desc_name[] = {
151 "ODS", "SCCS", "TS", "IPS",
152 "GSD", "VSS", "BCS", "FCS",
153 };
154 u8 *dump = desc;
155 int i, j;
156
157 nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
158 for (j = headerlen - 1; j >= 0; j--)
159 printk("%02x", dump[j]);
160 printk("\n");
161 dump += headerlen;
162
163 for (i = 0; i < entries; i++, dump += recordlen) {
164 nv_debug(mxm, " ");
165 for (j = recordlen - 1; j >= 0; j--)
166 printk("%02x", dump[j]);
167 printk("\n");
168 }
169 }
170
171 if (types & (1 << type)) {
172 if (!exec(mxm, desc, info))
173 return false;
174 }
175
176 desc += headerlen + (entries * recordlen);
177 }
178
179 return true;
180}
181
182void
183mxms_output_device(struct nouveau_mxm *mxm, u8 *pdata, struct mxms_odev *desc)
184{
185 u64 data = ROM32(pdata[0]);
186 if (mxms_version(mxm) >= 0x0300)
187 data |= (u64)ROM16(pdata[4]) << 32;
188
189 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
190 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
191 desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
192 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
193}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
new file mode 100644
index 000000000000..5e0be0c591ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.h
@@ -0,0 +1,22 @@
1#ifndef __NVMXM_MXMS_H__
2#define __NVMXM_MXMS_H__
3
4struct mxms_odev {
5 u8 outp_type;
6 u8 conn_type;
7 u8 ddc_port;
8 u8 dig_conn;
9};
10
11void mxms_output_device(struct nouveau_mxm *, u8 *, struct mxms_odev *);
12
13u16 mxms_version(struct nouveau_mxm *);
14u16 mxms_headerlen(struct nouveau_mxm *);
15u16 mxms_structlen(struct nouveau_mxm *);
16bool mxms_checksum(struct nouveau_mxm *);
17bool mxms_valid(struct nouveau_mxm *);
18
19bool mxms_foreach(struct nouveau_mxm *, u8,
20 bool (*)(struct nouveau_mxm *, u8 *, void *), void *);
21
22#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
new file mode 100644
index 000000000000..af129c2e8113
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/nv50.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/mxm.h>
26#include <subdev/bios.h>
27#include <subdev/bios/conn.h>
28#include <subdev/bios/dcb.h>
29#include <subdev/bios/mxm.h>
30
31#include "mxms.h"
32
33struct nv50_mxm_priv {
34 struct nouveau_mxm base;
35};
36
37struct context {
38 u32 *outp;
39 struct mxms_odev desc;
40};
41
42static bool
43mxm_match_tmds_partner(struct nouveau_mxm *mxm, u8 *data, void *info)
44{
45 struct context *ctx = info;
46 struct mxms_odev desc;
47
48 mxms_output_device(mxm, data, &desc);
49 if (desc.outp_type == 2 &&
50 desc.dig_conn == ctx->desc.dig_conn)
51 return false;
52 return true;
53}
54
55static bool
56mxm_match_dcb(struct nouveau_mxm *mxm, u8 *data, void *info)
57{
58 struct nouveau_bios *bios = nouveau_bios(mxm);
59 struct context *ctx = info;
60 u64 desc = *(u64 *)data;
61
62 mxms_output_device(mxm, data, &ctx->desc);
63
64 /* match dcb encoder type to mxm-ods device type */
65 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
66 return true;
67
68 /* digital output, have some extra stuff to match here, there's a
69 * table in the vbios that provides a mapping from the mxm digital
70 * connection enum values to SOR/link
71 */
72 if ((desc & 0x00000000000000f0) >= 0x20) {
73 /* check against sor index */
74 u8 link = mxm_sor_map(bios, ctx->desc.dig_conn);
75 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
76 return true;
77
78 /* check dcb entry has a compatible link field */
79 link = (link & 0x30) >> 4;
80 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
81 return true;
82 }
83
84 /* mark this descriptor accounted for by setting invalid device type,
85 * except of course some manufactures don't follow specs properly and
86 * we need to avoid killing off the TMDS function on DP connectors
87 * if MXM-SIS is missing an entry for it.
88 */
89 data[0] &= ~0xf0;
90 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
91 mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) {
92 data[0] |= 0x20; /* modify descriptor to match TMDS now */
93 } else {
94 data[0] |= 0xf0;
95 }
96
97 return false;
98}
99
100static int
101mxm_dcb_sanitise_entry(struct nouveau_bios *bios, void *data, int idx, u16 pdcb)
102{
103 struct nouveau_mxm *mxm = nouveau_mxm(bios);
104 struct context ctx = { .outp = (u32 *)(bios->data + pdcb) };
105 u8 type, i2cidx, link, ver, len;
106 u8 *conn;
107
108 /* look for an output device structure that matches this dcb entry.
109 * if one isn't found, disable it.
110 */
111 if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) {
112 nv_debug(mxm, "disable %d: 0x%08x 0x%08x\n",
113 idx, ctx.outp[0], ctx.outp[1]);
114 ctx.outp[0] |= 0x0000000f;
115 return 0;
116 }
117
118 /* modify the output's ddc/aux port, there's a pointer to a table
119 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
120 * vbios mxm table
121 */
122 i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port);
123 if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP)
124 i2cidx = (i2cidx & 0x0f) << 4;
125 else
126 i2cidx = (i2cidx & 0xf0);
127
128 if (i2cidx != 0xf0) {
129 ctx.outp[0] &= ~0x000000f0;
130 ctx.outp[0] |= i2cidx;
131 }
132
133 /* override dcb sorconf.link, based on what mxm data says */
134 switch (ctx.desc.outp_type) {
135 case 0x00: /* Analog CRT */
136 case 0x01: /* Analog TV/HDTV */
137 break;
138 default:
139 link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30;
140 ctx.outp[1] &= ~0x00000030;
141 ctx.outp[1] |= link;
142 break;
143 }
144
145 /* we may need to fixup various other vbios tables based on what
146 * the descriptor says the connector type should be.
147 *
148 * in a lot of cases, the vbios tables will claim DVI-I is possible,
149 * and the mxm data says the connector is really HDMI. another
150 * common example is DP->eDP.
151 */
152 conn = bios->data;
153 conn += dcb_conn(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len);
154 type = conn[0];
155 switch (ctx.desc.conn_type) {
156 case 0x01: /* LVDS */
157 ctx.outp[1] |= 0x00000004; /* use_power_scripts */
158 /* XXX: modify default link width in LVDS table */
159 break;
160 case 0x02: /* HDMI */
161 type = DCB_CONNECTOR_HDMI_1;
162 break;
163 case 0x03: /* DVI-D */
164 type = DCB_CONNECTOR_DVI_D;
165 break;
166 case 0x0e: /* eDP, falls through to DPint */
167 ctx.outp[1] |= 0x00010000;
168 case 0x07: /* DP internal, wtf is this?? HP8670w */
169 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
170 type = DCB_CONNECTOR_eDP;
171 break;
172 default:
173 break;
174 }
175
176 if (mxms_version(mxm) >= 0x0300)
177 conn[0] = type;
178
179 return 0;
180}
181
182static bool
183mxm_show_unmatched(struct nouveau_mxm *mxm, u8 *data, void *info)
184{
185 u64 desc = *(u64 *)data;
186 if ((desc & 0xf0) != 0xf0)
187 nv_info(mxm, "unmatched output device 0x%016llx\n", desc);
188 return true;
189}
190
191static void
192mxm_dcb_sanitise(struct nouveau_mxm *mxm)
193{
194 struct nouveau_bios *bios = nouveau_bios(mxm);
195 u8 ver, hdr, cnt, len;
196 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len);
197 if (dcb == 0x0000 || ver != 0x40) {
198 nv_debug(mxm, "unsupported DCB version\n");
199 return;
200 }
201
202 dcb_outp_foreach(bios, NULL, mxm_dcb_sanitise_entry);
203 mxms_foreach(mxm, 0x01, mxm_show_unmatched, NULL);
204}
205
206static int
207nv50_mxm_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
208 struct nouveau_oclass *oclass, void *data, u32 size,
209 struct nouveau_object **pobject)
210{
211 struct nv50_mxm_priv *priv;
212 int ret;
213
214 ret = nouveau_mxm_create(parent, engine, oclass, &priv);
215 *pobject = nv_object(priv);
216 if (ret)
217 return ret;
218
219 if (priv->base.action & MXM_SANITISE_DCB)
220 mxm_dcb_sanitise(&priv->base);
221 return 0;
222}
223
224struct nouveau_oclass
225nv50_mxm_oclass = {
226 .handle = NV_SUBDEV(MXM, 0x50),
227 .ofuncs = &(struct nouveau_ofuncs) {
228 .ctor = nv50_mxm_ctor,
229 .dtor = _nouveau_mxm_dtor,
230 .init = _nouveau_mxm_init,
231 .fini = _nouveau_mxm_fini,
232 },
233};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
new file mode 100644
index 000000000000..1674c74a76c8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <core/object.h>
26#include <core/device.h>
27
28#include <subdev/bios.h>
29
30#include "priv.h"
31
32int
33nouveau_therm_attr_get(struct nouveau_therm *therm,
34 enum nouveau_therm_attr_type type)
35{
36 struct nouveau_therm_priv *priv = (void *)therm;
37
38 switch (type) {
39 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
40 return priv->bios_fan.min_duty;
41 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
42 return priv->bios_fan.max_duty;
43 case NOUVEAU_THERM_ATTR_FAN_MODE:
44 return priv->fan.mode;
45 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
46 return priv->bios_sensor.thrs_fan_boost.temp;
47 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
48 return priv->bios_sensor.thrs_fan_boost.hysteresis;
49 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
50 return priv->bios_sensor.thrs_down_clock.temp;
51 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
52 return priv->bios_sensor.thrs_down_clock.hysteresis;
53 case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
54 return priv->bios_sensor.thrs_critical.temp;
55 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
56 return priv->bios_sensor.thrs_critical.hysteresis;
57 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
58 return priv->bios_sensor.thrs_shutdown.temp;
59 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
60 return priv->bios_sensor.thrs_shutdown.hysteresis;
61 }
62
63 return -EINVAL;
64}
65
66int
67nouveau_therm_attr_set(struct nouveau_therm *therm,
68 enum nouveau_therm_attr_type type, int value)
69{
70 struct nouveau_therm_priv *priv = (void *)therm;
71
72 switch (type) {
73 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
74 if (value < 0)
75 value = 0;
76 if (value > priv->bios_fan.max_duty)
77 value = priv->bios_fan.max_duty;
78 priv->bios_fan.min_duty = value;
79 return 0;
80 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
81 if (value < 0)
82 value = 0;
83 if (value < priv->bios_fan.min_duty)
84 value = priv->bios_fan.min_duty;
85 priv->bios_fan.max_duty = value;
86 return 0;
87 case NOUVEAU_THERM_ATTR_FAN_MODE:
88 return nouveau_therm_fan_set_mode(therm, value);
89 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
90 priv->bios_sensor.thrs_fan_boost.temp = value;
91 return 0;
92 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
93 priv->bios_sensor.thrs_fan_boost.hysteresis = value;
94 return 0;
95 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
96 priv->bios_sensor.thrs_down_clock.temp = value;
97 return 0;
98 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
99 priv->bios_sensor.thrs_down_clock.hysteresis = value;
100 return 0;
101 case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
102 priv->bios_sensor.thrs_critical.temp = value;
103 return 0;
104 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
105 priv->bios_sensor.thrs_critical.hysteresis = value;
106 return 0;
107 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
108 priv->bios_sensor.thrs_shutdown.temp = value;
109 return 0;
110 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
111 priv->bios_sensor.thrs_shutdown.hysteresis = value;
112 return 0;
113 }
114
115 return -EINVAL;
116}
117
118int
119nouveau_therm_init(struct nouveau_object *object)
120{
121 struct nouveau_therm *therm = (void *)object;
122 struct nouveau_therm_priv *priv = (void *)therm;
123 int ret;
124
125 ret = nouveau_subdev_init(&therm->base);
126 if (ret)
127 return ret;
128
129 if (priv->fan.percent >= 0)
130 therm->fan_set(therm, priv->fan.percent);
131
132 return 0;
133}
134
135int
136nouveau_therm_fini(struct nouveau_object *object, bool suspend)
137{
138 struct nouveau_therm *therm = (void *)object;
139 struct nouveau_therm_priv *priv = (void *)therm;
140
141 priv->fan.percent = therm->fan_get(therm);
142
143 return nouveau_subdev_fini(&therm->base, suspend);
144}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
new file mode 100644
index 000000000000..b29237970fa0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28#include <core/object.h>
29#include <core/device.h>
30#include <subdev/gpio.h>
31#include <subdev/timer.h>
32
33int
34nouveau_therm_fan_get(struct nouveau_therm *therm)
35{
36 struct nouveau_therm_priv *priv = (void *)therm;
37 struct nouveau_gpio *gpio = nouveau_gpio(therm);
38 struct dcb_gpio_func func;
39 int card_type = nv_device(therm)->card_type;
40 u32 divs, duty;
41 int ret;
42
43 if (!priv->fan.pwm_get)
44 return -ENODEV;
45
46 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
47 if (ret == 0) {
48 ret = priv->fan.pwm_get(therm, func.line, &divs, &duty);
49 if (ret == 0 && divs) {
50 divs = max(divs, duty);
51 if (card_type <= NV_40 || (func.log[0] & 1))
52 duty = divs - duty;
53 return (duty * 100) / divs;
54 }
55
56 return gpio->get(gpio, 0, func.func, func.line) * 100;
57 }
58
59 return -ENODEV;
60}
61
62int
63nouveau_therm_fan_set(struct nouveau_therm *therm, int percent)
64{
65 struct nouveau_therm_priv *priv = (void *)therm;
66 struct nouveau_gpio *gpio = nouveau_gpio(therm);
67 struct dcb_gpio_func func;
68 int card_type = nv_device(therm)->card_type;
69 u32 divs, duty;
70 int ret;
71
72 if (priv->fan.mode == FAN_CONTROL_NONE)
73 return -EINVAL;
74
75 if (!priv->fan.pwm_set)
76 return -ENODEV;
77
78 if (percent < priv->bios_fan.min_duty)
79 percent = priv->bios_fan.min_duty;
80 if (percent > priv->bios_fan.max_duty)
81 percent = priv->bios_fan.max_duty;
82
83 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
84 if (ret == 0) {
85 divs = priv->bios_perf_fan.pwm_divisor;
86 if (priv->bios_fan.pwm_freq) {
87 divs = 1;
88 if (priv->fan.pwm_clock)
89 divs = priv->fan.pwm_clock(therm);
90 divs /= priv->bios_fan.pwm_freq;
91 }
92
93 duty = ((divs * percent) + 99) / 100;
94 if (card_type <= NV_40 || (func.log[0] & 1))
95 duty = divs - duty;
96
97 ret = priv->fan.pwm_set(therm, func.line, divs, duty);
98 return ret;
99 }
100
101 return -ENODEV;
102}
103
104int
105nouveau_therm_fan_sense(struct nouveau_therm *therm)
106{
107 struct nouveau_timer *ptimer = nouveau_timer(therm);
108 struct nouveau_gpio *gpio = nouveau_gpio(therm);
109 struct dcb_gpio_func func;
110 u32 cycles, cur, prev;
111 u64 start, end, tach;
112
113 if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func))
114 return -ENODEV;
115
116 /* Time a complete rotation and extrapolate to RPM:
117 * When the fan spins, it changes the value of GPIO FAN_SENSE.
118 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
119 */
120 start = ptimer->read(ptimer);
121 prev = gpio->get(gpio, 0, func.func, func.line);
122 cycles = 0;
123 do {
124 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
125
126 cur = gpio->get(gpio, 0, func.func, func.line);
127 if (prev != cur) {
128 if (!start)
129 start = ptimer->read(ptimer);
130 cycles++;
131 prev = cur;
132 }
133 } while (cycles < 5 && ptimer->read(ptimer) - start < 250000000);
134 end = ptimer->read(ptimer);
135
136 if (cycles == 5) {
137 tach = (u64)60000000000;
138 do_div(tach, (end - start));
139 return tach;
140 } else
141 return 0;
142}
143
144int
145nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
146 enum nouveau_therm_fan_mode mode)
147{
148 struct nouveau_therm_priv *priv = (void *)therm;
149
150 if (priv->fan.mode == mode)
151 return 0;
152
153 if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
154 return -EINVAL;
155
156 switch (mode)
157 {
158 case FAN_CONTROL_NONE:
159 nv_info(therm, "switch fan to no-control mode\n");
160 break;
161 case FAN_CONTROL_MANUAL:
162 nv_info(therm, "switch fan to manual mode\n");
163 break;
164 case FAN_CONTROL_NR:
165 break;
166 }
167
168 priv->fan.mode = mode;
169 return 0;
170}
171
172int
173nouveau_therm_fan_user_get(struct nouveau_therm *therm)
174{
175 return nouveau_therm_fan_get(therm);
176}
177
178int
179nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
180{
181 struct nouveau_therm_priv *priv = (void *)therm;
182
183 if (priv->fan.mode != FAN_CONTROL_MANUAL)
184 return -EINVAL;
185
186 return nouveau_therm_fan_set(therm, percent);
187}
188
189void
190nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
191{
192 struct nouveau_therm_priv *priv = (void *)therm;
193
194 priv->bios_fan.pwm_freq = 0;
195 priv->bios_fan.min_duty = 0;
196 priv->bios_fan.max_duty = 100;
197}
198
199
200static void
201nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
202{
203 struct nouveau_therm_priv *priv = (void *)therm;
204
205 if (priv->bios_fan.min_duty > 100)
206 priv->bios_fan.min_duty = 100;
207 if (priv->bios_fan.max_duty > 100)
208 priv->bios_fan.max_duty = 100;
209
210 if (priv->bios_fan.min_duty > priv->bios_fan.max_duty)
211 priv->bios_fan.min_duty = priv->bios_fan.max_duty;
212}
213
214int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
215{
216 return 1;
217}
218
219int
220nouveau_therm_fan_ctor(struct nouveau_therm *therm)
221{
222 struct nouveau_therm_priv *priv = (void *)therm;
223 struct nouveau_bios *bios = nouveau_bios(therm);
224
225 nouveau_therm_fan_set_defaults(therm);
226 nvbios_perf_fan_parse(bios, &priv->bios_perf_fan);
227 if (nvbios_therm_fan_parse(bios, &priv->bios_fan))
228 nv_error(therm, "parsing the thermal table failed\n");
229 nouveau_therm_fan_safety_checks(therm);
230
231 nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
232
233 return 0;
234}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
new file mode 100644
index 000000000000..e512ff0aae60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012 Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27#include <subdev/i2c.h>
28#include <subdev/bios/extdev.h>
29
30static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info)
33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c);
35 struct i2c_client *client;
36
37 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
38
39 client = i2c_new_device(&i2c->adapter, info);
40 if (!client)
41 return false;
42
43 if (!client->driver || client->driver->detect(client, info)) {
44 i2c_unregister_device(client);
45 return false;
46 }
47
48 nv_info(priv,
49 "Found an %s at address 0x%x (controlled by lm_sensors)\n",
50 info->type, info->addr);
51 priv->ic = client;
52
53 return true;
54}
55
56void
57nouveau_therm_ic_ctor(struct nouveau_therm *therm)
58{
59 struct nouveau_therm_priv *priv = (void *)therm;
60 struct nouveau_bios *bios = nouveau_bios(therm);
61 struct nouveau_i2c *i2c = nouveau_i2c(therm);
62 struct nvbios_extdev_func extdev_entry;
63 struct i2c_board_info info[] = {
64 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
65 { I2C_BOARD_INFO("w83781d", 0x2d) },
66 { I2C_BOARD_INFO("adt7473", 0x2e) },
67 { I2C_BOARD_INFO("adt7473", 0x2d) },
68 { I2C_BOARD_INFO("adt7473", 0x2c) },
69 { I2C_BOARD_INFO("f75375", 0x2e) },
70 { I2C_BOARD_INFO("lm99", 0x4c) },
71 { I2C_BOARD_INFO("lm90", 0x4c) },
72 { I2C_BOARD_INFO("lm90", 0x4d) },
73 { I2C_BOARD_INFO("adm1021", 0x18) },
74 { I2C_BOARD_INFO("adm1021", 0x19) },
75 { I2C_BOARD_INFO("adm1021", 0x1a) },
76 { I2C_BOARD_INFO("adm1021", 0x29) },
77 { I2C_BOARD_INFO("adm1021", 0x2a) },
78 { I2C_BOARD_INFO("adm1021", 0x2b) },
79 { I2C_BOARD_INFO("adm1021", 0x4c) },
80 { I2C_BOARD_INFO("adm1021", 0x4d) },
81 { I2C_BOARD_INFO("adm1021", 0x4e) },
82 { I2C_BOARD_INFO("lm63", 0x18) },
83 { I2C_BOARD_INFO("lm63", 0x4e) },
84 { }
85 };
86
87 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
88 struct i2c_board_info board[] = {
89 { I2C_BOARD_INFO("lm90", extdev_entry.addr >> 1) },
90 { }
91 };
92
93 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
94 board, probe_monitoring_device);
95 if (priv->ic)
96 return;
97 }
98
99 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_ADT7473, &extdev_entry)) {
100 struct i2c_board_info board[] = {
101 { I2C_BOARD_INFO("adt7473", extdev_entry.addr >> 1) },
102 { }
103 };
104
105 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
106 board, probe_monitoring_device);
107 if (priv->ic)
108 return;
109 }
110
111 /* The vbios doesn't provide the address of an exisiting monitoring
112 device. Let's try our static list.
113 */
114 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info,
115 probe_monitoring_device);
116}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
new file mode 100644
index 000000000000..fcf2cfe731d6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28static int
29nv40_sensor_setup(struct nouveau_therm *therm)
30{
31 struct nouveau_device *device = nv_device(therm);
32
33 /* enable ADC readout and disable the ALARM threshold */
34 if (device->chipset >= 0x46) {
35 nv_mask(therm, 0x15b8, 0x80000000, 0);
36 nv_wr32(therm, 0x15b0, 0x80003fff);
37 return nv_rd32(therm, 0x15b4) & 0x3fff;
38 } else {
39 nv_wr32(therm, 0x15b0, 0xff);
40 return nv_rd32(therm, 0x15b4) & 0xff;
41 }
42}
43
44static int
45nv40_temp_get(struct nouveau_therm *therm)
46{
47 struct nouveau_therm_priv *priv = (void *)therm;
48 struct nouveau_device *device = nv_device(therm);
49 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
50 int core_temp;
51
52 if (device->chipset >= 0x46) {
53 nv_wr32(therm, 0x15b0, 0x80003fff);
54 core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
55 } else {
56 nv_wr32(therm, 0x15b0, 0xff);
57 core_temp = nv_rd32(therm, 0x15b4) & 0xff;
58 }
59
60 /* Setup the sensor if the temperature is 0 */
61 if (core_temp == 0)
62 core_temp = nv40_sensor_setup(therm);
63
64 if (sensor->slope_div == 0)
65 sensor->slope_div = 1;
66 if (sensor->offset_den == 0)
67 sensor->offset_den = 1;
68 if (sensor->slope_mult < 1)
69 sensor->slope_mult = 1;
70
71 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
72 core_temp = core_temp + sensor->offset_num / sensor->offset_den;
73 core_temp = core_temp + sensor->offset_constant - 8;
74
75 return core_temp;
76}
77
78int
79nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
80{
81 if (line == 2) {
82 u32 reg = nv_rd32(therm, 0x0010f0);
83 if (reg & 0x80000000) {
84 *duty = (reg & 0x7fff0000) >> 16;
85 *divs = (reg & 0x00007fff);
86 return 0;
87 }
88 } else
89 if (line == 9) {
90 u32 reg = nv_rd32(therm, 0x0015f4);
91 if (reg & 0x80000000) {
92 *divs = nv_rd32(therm, 0x0015f8);
93 *duty = (reg & 0x7fffffff);
94 return 0;
95 }
96 } else {
97 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
98 return -ENODEV;
99 }
100
101 return -EINVAL;
102}
103
104int
105nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
106{
107 if (line == 2) {
108 nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs);
109 } else
110 if (line == 9) {
111 nv_wr32(therm, 0x0015f8, divs);
112 nv_wr32(therm, 0x0015f4, duty | 0x80000000);
113 } else {
114 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
115 return -ENODEV;
116 }
117
118 return 0;
119}
120
121static int
122nv40_therm_ctor(struct nouveau_object *parent,
123 struct nouveau_object *engine,
124 struct nouveau_oclass *oclass, void *data, u32 size,
125 struct nouveau_object **pobject)
126{
127 struct nouveau_therm_priv *priv;
128 struct nouveau_therm *therm;
129 int ret;
130
131 ret = nouveau_therm_create(parent, engine, oclass, &priv);
132 *pobject = nv_object(priv);
133 therm = (void *) priv;
134 if (ret)
135 return ret;
136
137 nouveau_therm_ic_ctor(therm);
138 nouveau_therm_sensor_ctor(therm);
139 nouveau_therm_fan_ctor(therm);
140
141 priv->fan.pwm_get = nv40_fan_pwm_get;
142 priv->fan.pwm_set = nv40_fan_pwm_set;
143
144 therm->temp_get = nv40_temp_get;
145 therm->fan_get = nouveau_therm_fan_user_get;
146 therm->fan_set = nouveau_therm_fan_user_set;
147 therm->fan_sense = nouveau_therm_fan_sense;
148 therm->attr_get = nouveau_therm_attr_get;
149 therm->attr_set = nouveau_therm_attr_set;
150
151 return 0;
152}
153
154struct nouveau_oclass
155nv40_therm_oclass = {
156 .handle = NV_SUBDEV(THERM, 0x40),
157 .ofuncs = &(struct nouveau_ofuncs) {
158 .ctor = nv40_therm_ctor,
159 .dtor = _nouveau_therm_dtor,
160 .init = nouveau_therm_init,
161 .fini = nouveau_therm_fini,
162 },
163}; \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
new file mode 100644
index 000000000000..f87a7a3eb4e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -0,0 +1,157 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include "priv.h"
27
28static int
29pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
30{
31 if (*line == 0x04) {
32 *ctrl = 0x00e100;
33 *line = 4;
34 *indx = 0;
35 } else
36 if (*line == 0x09) {
37 *ctrl = 0x00e100;
38 *line = 9;
39 *indx = 1;
40 } else
41 if (*line == 0x10) {
42 *ctrl = 0x00e28c;
43 *line = 0;
44 *indx = 0;
45 } else {
46 nv_error(therm, "unknown pwm ctrl for gpio %d\n", *line);
47 return -ENODEV;
48 }
49
50 return 0;
51}
52
53int
54nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
55{
56 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
57 if (ret)
58 return ret;
59
60 if (nv_rd32(therm, ctrl) & (1 << line)) {
61 *divs = nv_rd32(therm, 0x00e114 + (id * 8));
62 *duty = nv_rd32(therm, 0x00e118 + (id * 8));
63 return 0;
64 }
65
66 return -EINVAL;
67}
68
69int
70nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
71{
72 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
73 if (ret)
74 return ret;
75
76 nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
77 nv_wr32(therm, 0x00e114 + (id * 8), divs);
78 nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
79 return 0;
80}
81
82int
83nv50_fan_pwm_clock(struct nouveau_therm *therm)
84{
85 int chipset = nv_device(therm)->chipset;
86 int crystal = nv_device(therm)->crystal;
87 int pwm_clock;
88
89 /* determine the PWM source clock */
90 if (chipset > 0x50 && chipset < 0x94) {
91 u8 pwm_div = nv_rd32(therm, 0x410c);
92 if (nv_rd32(therm, 0xc040) & 0x800000) {
93 /* Use the HOST clock (100 MHz)
94 * Where does this constant(2.4) comes from? */
95 pwm_clock = (100000000 >> pwm_div) / 10 / 24;
96 } else {
97 /* Where does this constant(20) comes from? */
98 pwm_clock = (crystal * 1000) >> pwm_div;
99 pwm_clock /= 20;
100 }
101 } else {
102 pwm_clock = (crystal * 1000) / 20;
103 }
104
105 return pwm_clock;
106}
107
108int
109nv50_temp_get(struct nouveau_therm *therm)
110{
111 return nv_rd32(therm, 0x20400);
112}
113
114static int
115nv50_therm_ctor(struct nouveau_object *parent,
116 struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject)
119{
120 struct nouveau_therm_priv *priv;
121 struct nouveau_therm *therm;
122 int ret;
123
124 ret = nouveau_therm_create(parent, engine, oclass, &priv);
125 *pobject = nv_object(priv);
126 therm = (void *) priv;
127 if (ret)
128 return ret;
129
130 nouveau_therm_ic_ctor(therm);
131 nouveau_therm_sensor_ctor(therm);
132 nouveau_therm_fan_ctor(therm);
133
134 priv->fan.pwm_get = nv50_fan_pwm_get;
135 priv->fan.pwm_set = nv50_fan_pwm_set;
136 priv->fan.pwm_clock = nv50_fan_pwm_clock;
137
138 therm->temp_get = nv50_temp_get;
139 therm->fan_get = nouveau_therm_fan_user_get;
140 therm->fan_set = nouveau_therm_fan_user_set;
141 therm->fan_sense = nouveau_therm_fan_sense;
142 therm->attr_get = nouveau_therm_attr_get;
143 therm->attr_set = nouveau_therm_attr_set;
144
145 return 0;
146}
147
148struct nouveau_oclass
149nv50_therm_oclass = {
150 .handle = NV_SUBDEV(THERM, 0x50),
151 .ofuncs = &(struct nouveau_ofuncs) {
152 .ctor = nv50_therm_ctor,
153 .dtor = _nouveau_therm_dtor,
154 .init = nouveau_therm_init,
155 .fini = nouveau_therm_fini,
156 },
157};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
new file mode 100644
index 000000000000..1c3cd6abc36e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <subdev/therm.h>
26
27#include <subdev/bios/extdev.h>
28#include <subdev/bios/perf.h>
29#include <subdev/bios/therm.h>
30
31struct nouveau_therm_priv {
32 struct nouveau_therm base;
33
34 /* bios */
35 struct nvbios_therm_sensor bios_sensor;
36 struct nvbios_therm_fan bios_fan;
37 struct nvbios_perf_fan bios_perf_fan;
38
39 /* fan priv */
40 struct {
41 enum nouveau_therm_fan_mode mode;
42 int percent;
43
44 int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*);
45 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
46 int (*pwm_clock)(struct nouveau_therm *);
47 } fan;
48
49 /* ic */
50 struct i2c_client *ic;
51};
52
53int nouveau_therm_init(struct nouveau_object *object);
54int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
55int nouveau_therm_attr_get(struct nouveau_therm *therm,
56 enum nouveau_therm_attr_type type);
57int nouveau_therm_attr_set(struct nouveau_therm *therm,
58 enum nouveau_therm_attr_type type, int value);
59
60void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
61
62int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
63
64int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
65int nouveau_therm_fan_get(struct nouveau_therm *therm);
66int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent);
67int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
68int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
69int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
70 enum nouveau_therm_fan_mode mode);
71
72
73int nouveau_therm_fan_sense(struct nouveau_therm *therm);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
new file mode 100644
index 000000000000..204282301fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27#include <core/object.h>
28#include <core/device.h>
29
30#include <subdev/bios.h>
31
32static void
33nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
34{
35 struct nouveau_therm_priv *priv = (void *)therm;
36
37 priv->bios_sensor.slope_mult = 1;
38 priv->bios_sensor.slope_div = 1;
39 priv->bios_sensor.offset_num = 0;
40 priv->bios_sensor.offset_den = 1;
41 priv->bios_sensor.offset_constant = 0;
42
43 priv->bios_sensor.thrs_fan_boost.temp = 90;
44 priv->bios_sensor.thrs_fan_boost.hysteresis = 3;
45
46 priv->bios_sensor.thrs_down_clock.temp = 95;
47 priv->bios_sensor.thrs_down_clock.hysteresis = 3;
48
49 priv->bios_sensor.thrs_critical.temp = 105;
50 priv->bios_sensor.thrs_critical.hysteresis = 5;
51
52 priv->bios_sensor.thrs_shutdown.temp = 135;
53 priv->bios_sensor.thrs_shutdown.hysteresis = 5; /*not that it matters */
54}
55
56
57static void
58nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
59{
60 struct nouveau_therm_priv *priv = (void *)therm;
61
62 if (!priv->bios_sensor.slope_div)
63 priv->bios_sensor.slope_div = 1;
64 if (!priv->bios_sensor.offset_den)
65 priv->bios_sensor.offset_den = 1;
66}
67
68int
69nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
70{
71 struct nouveau_therm_priv *priv = (void *)therm;
72 struct nouveau_bios *bios = nouveau_bios(therm);
73
74 nouveau_therm_temp_set_defaults(therm);
75 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
76 &priv->bios_sensor))
77 nv_error(therm, "nvbios_therm_sensor_parse failed\n");
78 nouveau_therm_temp_safety_checks(therm);
79
80 return 0;
81}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
new file mode 100644
index 000000000000..5d417cc9949b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "subdev/timer.h"
26
27bool
28nouveau_timer_wait_eq(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
29{
30 struct nouveau_timer *ptimer = nouveau_timer(obj);
31 u64 time0;
32
33 time0 = ptimer->read(ptimer);
34 do {
35 if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
36 if ((nv_rd32(obj, addr) & mask) == data)
37 return true;
38 } else {
39 if ((nv_ro32(obj, addr) & mask) == data)
40 return true;
41 }
42 } while (ptimer->read(ptimer) - time0 < nsec);
43
44 return false;
45}
46
47bool
48nouveau_timer_wait_ne(void *obj, u64 nsec, u32 addr, u32 mask, u32 data)
49{
50 struct nouveau_timer *ptimer = nouveau_timer(obj);
51 u64 time0;
52
53 time0 = ptimer->read(ptimer);
54 do {
55 if (nv_iclass(obj, NV_SUBDEV_CLASS)) {
56 if ((nv_rd32(obj, addr) & mask) != data)
57 return true;
58 } else {
59 if ((nv_ro32(obj, addr) & mask) != data)
60 return true;
61 }
62 } while (ptimer->read(ptimer) - time0 < nsec);
63
64 return false;
65}
66
67bool
68nouveau_timer_wait_cb(void *obj, u64 nsec, bool (*func)(void *), void *data)
69{
70 struct nouveau_timer *ptimer = nouveau_timer(obj);
71 u64 time0;
72
73 time0 = ptimer->read(ptimer);
74 do {
75 if (func(data) == true)
76 return true;
77 } while (ptimer->read(ptimer) - time0 < nsec);
78
79 return false;
80}
81
82void
83nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
84{
85 struct nouveau_timer *ptimer = nouveau_timer(obj);
86 ptimer->alarm(ptimer, nsec, alarm);
87}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
new file mode 100644
index 000000000000..49976be4d73b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -0,0 +1,249 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/timer.h>
26
27#define NV04_PTIMER_INTR_0 0x009100
28#define NV04_PTIMER_INTR_EN_0 0x009140
29#define NV04_PTIMER_NUMERATOR 0x009200
30#define NV04_PTIMER_DENOMINATOR 0x009210
31#define NV04_PTIMER_TIME_0 0x009400
32#define NV04_PTIMER_TIME_1 0x009410
33#define NV04_PTIMER_ALARM_0 0x009420
34
35struct nv04_timer_priv {
36 struct nouveau_timer base;
37 struct list_head alarms;
38 spinlock_t lock;
39};
40
41static u64
42nv04_timer_read(struct nouveau_timer *ptimer)
43{
44 struct nv04_timer_priv *priv = (void *)ptimer;
45 u32 hi, lo;
46
47 do {
48 hi = nv_rd32(priv, NV04_PTIMER_TIME_1);
49 lo = nv_rd32(priv, NV04_PTIMER_TIME_0);
50 } while (hi != nv_rd32(priv, NV04_PTIMER_TIME_1));
51
52 return ((u64)hi << 32 | lo);
53}
54
55static void
56nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
57{
58 struct nv04_timer_priv *priv = (void *)ptimer;
59 struct nouveau_alarm *alarm, *atemp;
60 unsigned long flags;
61 LIST_HEAD(exec);
62
63 /* move any due alarms off the pending list */
64 spin_lock_irqsave(&priv->lock, flags);
65 list_for_each_entry_safe(alarm, atemp, &priv->alarms, head) {
66 if (alarm->timestamp <= ptimer->read(ptimer))
67 list_move_tail(&alarm->head, &exec);
68 }
69
70 /* reschedule interrupt for next alarm time */
71 if (!list_empty(&priv->alarms)) {
72 alarm = list_first_entry(&priv->alarms, typeof(*alarm), head);
73 nv_wr32(priv, NV04_PTIMER_ALARM_0, alarm->timestamp);
74 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000001);
75 } else {
76 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
77 }
78 spin_unlock_irqrestore(&priv->lock, flags);
79
80 /* execute any pending alarm handlers */
81 list_for_each_entry_safe(alarm, atemp, &exec, head) {
82 list_del(&alarm->head);
83 alarm->func(alarm);
84 }
85}
86
87static void
88nv04_timer_alarm(struct nouveau_timer *ptimer, u32 time,
89 struct nouveau_alarm *alarm)
90{
91 struct nv04_timer_priv *priv = (void *)ptimer;
92 struct nouveau_alarm *list;
93 unsigned long flags;
94
95 alarm->timestamp = ptimer->read(ptimer) + time;
96
97 /* append new alarm to list, in soonest-alarm-first order */
98 spin_lock_irqsave(&priv->lock, flags);
99 list_for_each_entry(list, &priv->alarms, head) {
100 if (list->timestamp > alarm->timestamp)
101 break;
102 }
103 list_add_tail(&alarm->head, &list->head);
104 spin_unlock_irqrestore(&priv->lock, flags);
105
106 /* process pending alarms */
107 nv04_timer_alarm_trigger(ptimer);
108}
109
110static void
111nv04_timer_intr(struct nouveau_subdev *subdev)
112{
113 struct nv04_timer_priv *priv = (void *)subdev;
114 u32 stat = nv_rd32(priv, NV04_PTIMER_INTR_0);
115
116 if (stat & 0x00000001) {
117 nv04_timer_alarm_trigger(&priv->base);
118 nv_wr32(priv, NV04_PTIMER_INTR_0, 0x00000001);
119 stat &= ~0x00000001;
120 }
121
122 if (stat) {
123 nv_error(priv, "unknown stat 0x%08x\n", stat);
124 nv_wr32(priv, NV04_PTIMER_INTR_0, stat);
125 }
126}
127
128static int
129nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
130 struct nouveau_oclass *oclass, void *data, u32 size,
131 struct nouveau_object **pobject)
132{
133 struct nv04_timer_priv *priv;
134 int ret;
135
136 ret = nouveau_timer_create(parent, engine, oclass, &priv);
137 *pobject = nv_object(priv);
138 if (ret)
139 return ret;
140
141 priv->base.base.intr = nv04_timer_intr;
142 priv->base.read = nv04_timer_read;
143 priv->base.alarm = nv04_timer_alarm;
144
145 INIT_LIST_HEAD(&priv->alarms);
146 spin_lock_init(&priv->lock);
147 return 0;
148}
149
150static void
151nv04_timer_dtor(struct nouveau_object *object)
152{
153 struct nv04_timer_priv *priv = (void *)object;
154 return nouveau_timer_destroy(&priv->base);
155}
156
157static int
158nv04_timer_init(struct nouveau_object *object)
159{
160 struct nouveau_device *device = nv_device(object);
161 struct nv04_timer_priv *priv = (void *)object;
162 u32 m = 1, f, n, d;
163 int ret;
164
165 ret = nouveau_timer_init(&priv->base);
166 if (ret)
167 return ret;
168
169 /* aim for 31.25MHz, which gives us nanosecond timestamps */
170 d = 1000000 / 32;
171
172 /* determine base clock for timer source */
173#if 0 /*XXX*/
174 if (device->chipset < 0x40) {
175 n = nouveau_hw_get_clock(device, PLL_CORE);
176 } else
177#endif
178 if (device->chipset <= 0x40) {
179 /*XXX: figure this out */
180 f = -1;
181 n = 0;
182 } else {
183 f = device->crystal;
184 n = f;
185 while (n < (d * 2)) {
186 n += (n / m);
187 m++;
188 }
189
190 nv_wr32(priv, 0x009220, m - 1);
191 }
192
193 if (!n) {
194 nv_warn(priv, "unknown input clock freq\n");
195 if (!nv_rd32(priv, NV04_PTIMER_NUMERATOR) ||
196 !nv_rd32(priv, NV04_PTIMER_DENOMINATOR)) {
197 nv_wr32(priv, NV04_PTIMER_NUMERATOR, 1);
198 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, 1);
199 }
200 return 0;
201 }
202
203 /* reduce ratio to acceptable values */
204 while (((n % 5) == 0) && ((d % 5) == 0)) {
205 n /= 5;
206 d /= 5;
207 }
208
209 while (((n % 2) == 0) && ((d % 2) == 0)) {
210 n /= 2;
211 d /= 2;
212 }
213
214 while (n > 0xffff || d > 0xffff) {
215 n >>= 1;
216 d >>= 1;
217 }
218
219 nv_debug(priv, "input frequency : %dHz\n", f);
220 nv_debug(priv, "input multiplier: %d\n", m);
221 nv_debug(priv, "numerator : 0x%08x\n", n);
222 nv_debug(priv, "denominator : 0x%08x\n", d);
223 nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
224
225 nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
226 nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
227 nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
228 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
229 return 0;
230}
231
232static int
233nv04_timer_fini(struct nouveau_object *object, bool suspend)
234{
235 struct nv04_timer_priv *priv = (void *)object;
236 nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
237 return nouveau_timer_fini(&priv->base, suspend);
238}
239
240struct nouveau_oclass
241nv04_timer_oclass = {
242 .handle = NV_SUBDEV(TIMER, 0x04),
243 .ofuncs = &(struct nouveau_ofuncs) {
244 .ctor = nv04_timer_ctor,
245 .dtor = nv04_timer_dtor,
246 .init = nv04_timer_init,
247 .fini = nv04_timer_fini,
248 }
249};
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 4c8d13965dd1..082c11b75acb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -22,22 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/gpuobj.h>
26#include "nouveau_drv.h" 26#include <core/mm.h>
27#include "nouveau_mm.h" 27
28#include "nouveau_vm.h" 28#include <subdev/fb.h>
29#include <subdev/vm.h>
29 30
30void 31void
31nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node) 32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32{ 33{
33 struct nouveau_vm *vm = vma->vm; 34 struct nouveau_vm *vm = vma->vm;
35 struct nouveau_vmmgr *vmm = vm->vmm;
34 struct nouveau_mm_node *r; 36 struct nouveau_mm_node *r;
35 int big = vma->node->type != vm->spg_shift; 37 int big = vma->node->type != vmm->spg_shift;
36 u32 offset = vma->node->offset + (delta >> 12); 38 u32 offset = vma->node->offset + (delta >> 12);
37 u32 bits = vma->node->type - 12; 39 u32 bits = vma->node->type - 12;
38 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 40 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
39 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 41 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
40 u32 max = 1 << (vm->pgt_bits - bits); 42 u32 max = 1 << (vmm->pgt_bits - bits);
41 u32 end, len; 43 u32 end, len;
42 44
43 delta = 0; 45 delta = 0;
@@ -53,7 +55,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
53 end = max; 55 end = max;
54 len = end - pte; 56 len = end - pte;
55 57
56 vm->map(vma, pgt, node, pte, len, phys, delta); 58 vmm->map(vma, pgt, node, pte, len, phys, delta);
57 59
58 num -= len; 60 num -= len;
59 pte += len; 61 pte += len;
@@ -67,7 +69,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
67 } 69 }
68 } 70 }
69 71
70 vm->flush(vm); 72 vmm->flush(vm);
71} 73}
72 74
73void 75void
@@ -81,13 +83,14 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
81 struct nouveau_mem *mem) 83 struct nouveau_mem *mem)
82{ 84{
83 struct nouveau_vm *vm = vma->vm; 85 struct nouveau_vm *vm = vma->vm;
84 int big = vma->node->type != vm->spg_shift; 86 struct nouveau_vmmgr *vmm = vm->vmm;
87 int big = vma->node->type != vmm->spg_shift;
85 u32 offset = vma->node->offset + (delta >> 12); 88 u32 offset = vma->node->offset + (delta >> 12);
86 u32 bits = vma->node->type - 12; 89 u32 bits = vma->node->type - 12;
87 u32 num = length >> vma->node->type; 90 u32 num = length >> vma->node->type;
88 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 91 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
89 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 92 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
90 u32 max = 1 << (vm->pgt_bits - bits); 93 u32 max = 1 << (vmm->pgt_bits - bits);
91 unsigned m, sglen; 94 unsigned m, sglen;
92 u32 end, len; 95 u32 end, len;
93 int i; 96 int i;
@@ -105,7 +108,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
105 for (m = 0; m < len; m++) { 108 for (m = 0; m < len; m++) {
106 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 109 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
107 110
108 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 111 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
109 num--; 112 num--;
110 pte++; 113 pte++;
111 114
@@ -120,7 +123,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
120 for (; m < sglen; m++) { 123 for (; m < sglen; m++) {
121 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 124 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
122 125
123 vm->map_sg(vma, pgt, mem, pte, 1, &addr); 126 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
124 num--; 127 num--;
125 pte++; 128 pte++;
126 if (num == 0) 129 if (num == 0)
@@ -130,7 +133,7 @@ nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
130 133
131 } 134 }
132finish: 135finish:
133 vm->flush(vm); 136 vmm->flush(vm);
134} 137}
135 138
136void 139void
@@ -138,14 +141,15 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
138 struct nouveau_mem *mem) 141 struct nouveau_mem *mem)
139{ 142{
140 struct nouveau_vm *vm = vma->vm; 143 struct nouveau_vm *vm = vma->vm;
144 struct nouveau_vmmgr *vmm = vm->vmm;
141 dma_addr_t *list = mem->pages; 145 dma_addr_t *list = mem->pages;
142 int big = vma->node->type != vm->spg_shift; 146 int big = vma->node->type != vmm->spg_shift;
143 u32 offset = vma->node->offset + (delta >> 12); 147 u32 offset = vma->node->offset + (delta >> 12);
144 u32 bits = vma->node->type - 12; 148 u32 bits = vma->node->type - 12;
145 u32 num = length >> vma->node->type; 149 u32 num = length >> vma->node->type;
146 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 150 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
147 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 151 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
148 u32 max = 1 << (vm->pgt_bits - bits); 152 u32 max = 1 << (vmm->pgt_bits - bits);
149 u32 end, len; 153 u32 end, len;
150 154
151 while (num) { 155 while (num) {
@@ -156,7 +160,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
156 end = max; 160 end = max;
157 len = end - pte; 161 len = end - pte;
158 162
159 vm->map_sg(vma, pgt, mem, pte, len, list); 163 vmm->map_sg(vma, pgt, mem, pte, len, list);
160 164
161 num -= len; 165 num -= len;
162 pte += len; 166 pte += len;
@@ -167,20 +171,21 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
167 } 171 }
168 } 172 }
169 173
170 vm->flush(vm); 174 vmm->flush(vm);
171} 175}
172 176
173void 177void
174nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) 178nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
175{ 179{
176 struct nouveau_vm *vm = vma->vm; 180 struct nouveau_vm *vm = vma->vm;
177 int big = vma->node->type != vm->spg_shift; 181 struct nouveau_vmmgr *vmm = vm->vmm;
182 int big = vma->node->type != vmm->spg_shift;
178 u32 offset = vma->node->offset + (delta >> 12); 183 u32 offset = vma->node->offset + (delta >> 12);
179 u32 bits = vma->node->type - 12; 184 u32 bits = vma->node->type - 12;
180 u32 num = length >> vma->node->type; 185 u32 num = length >> vma->node->type;
181 u32 pde = (offset >> vm->pgt_bits) - vm->fpde; 186 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
182 u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; 187 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
183 u32 max = 1 << (vm->pgt_bits - bits); 188 u32 max = 1 << (vmm->pgt_bits - bits);
184 u32 end, len; 189 u32 end, len;
185 190
186 while (num) { 191 while (num) {
@@ -191,7 +196,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
191 end = max; 196 end = max;
192 len = end - pte; 197 len = end - pte;
193 198
194 vm->unmap(pgt, pte, len); 199 vmm->unmap(pgt, pte, len);
195 200
196 num -= len; 201 num -= len;
197 pte += len; 202 pte += len;
@@ -201,7 +206,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
201 } 206 }
202 } 207 }
203 208
204 vm->flush(vm); 209 vmm->flush(vm);
205} 210}
206 211
207void 212void
@@ -213,6 +218,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma)
213static void 218static void
214nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) 219nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
215{ 220{
221 struct nouveau_vmmgr *vmm = vm->vmm;
216 struct nouveau_vm_pgd *vpgd; 222 struct nouveau_vm_pgd *vpgd;
217 struct nouveau_vm_pgt *vpgt; 223 struct nouveau_vm_pgt *vpgt;
218 struct nouveau_gpuobj *pgt; 224 struct nouveau_gpuobj *pgt;
@@ -227,7 +233,7 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
227 vpgt->obj[big] = NULL; 233 vpgt->obj[big] = NULL;
228 234
229 list_for_each_entry(vpgd, &vm->pgd_list, head) { 235 list_for_each_entry(vpgd, &vm->pgd_list, head) {
230 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
231 } 237 }
232 238
233 mutex_unlock(&vm->mm.mutex); 239 mutex_unlock(&vm->mm.mutex);
@@ -239,18 +245,19 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
239static int 245static int
240nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) 246nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
241{ 247{
248 struct nouveau_vmmgr *vmm = vm->vmm;
242 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 249 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
243 struct nouveau_vm_pgd *vpgd; 250 struct nouveau_vm_pgd *vpgd;
244 struct nouveau_gpuobj *pgt; 251 struct nouveau_gpuobj *pgt;
245 int big = (type != vm->spg_shift); 252 int big = (type != vmm->spg_shift);
246 u32 pgt_size; 253 u32 pgt_size;
247 int ret; 254 int ret;
248 255
249 pgt_size = (1 << (vm->pgt_bits + 12)) >> type; 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
250 pgt_size *= 8; 257 pgt_size *= 8;
251 258
252 mutex_unlock(&vm->mm.mutex); 259 mutex_unlock(&vm->mm.mutex);
253 ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
254 NVOBJ_FLAG_ZERO_ALLOC, &pgt); 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
255 mutex_lock(&vm->mm.mutex); 262 mutex_lock(&vm->mm.mutex);
256 if (unlikely(ret)) 263 if (unlikely(ret))
@@ -266,7 +273,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
266 273
267 vpgt->obj[big] = pgt; 274 vpgt->obj[big] = pgt;
268 list_for_each_entry(vpgd, &vm->pgd_list, head) { 275 list_for_each_entry(vpgd, &vm->pgd_list, head) {
269 vm->map_pgt(vpgd->obj, pde, vpgt->obj); 276 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
270 } 277 }
271 278
272 return 0; 279 return 0;
@@ -276,23 +283,26 @@ int
276nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, 283nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
277 u32 access, struct nouveau_vma *vma) 284 u32 access, struct nouveau_vma *vma)
278{ 285{
286 struct nouveau_vmmgr *vmm = vm->vmm;
279 u32 align = (1 << page_shift) >> 12; 287 u32 align = (1 << page_shift) >> 12;
280 u32 msize = size >> 12; 288 u32 msize = size >> 12;
281 u32 fpde, lpde, pde; 289 u32 fpde, lpde, pde;
282 int ret; 290 int ret;
283 291
284 mutex_lock(&vm->mm.mutex); 292 mutex_lock(&vm->mm.mutex);
285 ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node); 293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294 &vma->node);
286 if (unlikely(ret != 0)) { 295 if (unlikely(ret != 0)) {
287 mutex_unlock(&vm->mm.mutex); 296 mutex_unlock(&vm->mm.mutex);
288 return ret; 297 return ret;
289 } 298 }
290 299
291 fpde = (vma->node->offset >> vm->pgt_bits); 300 fpde = (vma->node->offset >> vmm->pgt_bits);
292 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
302
293 for (pde = fpde; pde <= lpde; pde++) { 303 for (pde = fpde; pde <= lpde; pde++) {
294 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; 304 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
295 int big = (vma->node->type != vm->spg_shift); 305 int big = (vma->node->type != vmm->spg_shift);
296 306
297 if (likely(vpgt->refcount[big])) { 307 if (likely(vpgt->refcount[big])) {
298 vpgt->refcount[big]++; 308 vpgt->refcount[big]++;
@@ -303,9 +313,8 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
303 if (ret) { 313 if (ret) {
304 if (pde != fpde) 314 if (pde != fpde)
305 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); 315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
306 nouveau_mm_put(&vm->mm, vma->node); 316 nouveau_mm_free(&vm->mm, &vma->node);
307 mutex_unlock(&vm->mm.mutex); 317 mutex_unlock(&vm->mm.mutex);
308 vma->node = NULL;
309 return ret; 318 return ret;
310 } 319 }
311 } 320 }
@@ -321,91 +330,67 @@ void
321nouveau_vm_put(struct nouveau_vma *vma) 330nouveau_vm_put(struct nouveau_vma *vma)
322{ 331{
323 struct nouveau_vm *vm = vma->vm; 332 struct nouveau_vm *vm = vma->vm;
333 struct nouveau_vmmgr *vmm = vm->vmm;
324 u32 fpde, lpde; 334 u32 fpde, lpde;
325 335
326 if (unlikely(vma->node == NULL)) 336 if (unlikely(vma->node == NULL))
327 return; 337 return;
328 fpde = (vma->node->offset >> vm->pgt_bits); 338 fpde = (vma->node->offset >> vmm->pgt_bits);
329 lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; 339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
330 340
331 mutex_lock(&vm->mm.mutex); 341 mutex_lock(&vm->mm.mutex);
332 nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); 342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
333 nouveau_mm_put(&vm->mm, vma->node); 343 nouveau_mm_free(&vm->mm, &vma->node);
334 vma->node = NULL;
335 mutex_unlock(&vm->mm.mutex); 344 mutex_unlock(&vm->mm.mutex);
336} 345}
337 346
338int 347int
339nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, 348nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
340 struct nouveau_vm **pvm) 349 u64 mm_offset, u32 block, struct nouveau_vm **pvm)
341{ 350{
342 struct drm_nouveau_private *dev_priv = dev->dev_private;
343 struct nouveau_vm *vm; 351 struct nouveau_vm *vm;
344 u64 mm_length = (offset + length) - mm_offset; 352 u64 mm_length = (offset + length) - mm_offset;
345 u32 block, pgt_bits;
346 int ret; 353 int ret;
347 354
348 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL);
349 if (!vm) 356 if (!vm)
350 return -ENOMEM; 357 return -ENOMEM;
351 358
352 if (dev_priv->card_type == NV_50) { 359 INIT_LIST_HEAD(&vm->pgd_list);
353 vm->map_pgt = nv50_vm_map_pgt; 360 vm->vmm = vmm;
354 vm->map = nv50_vm_map; 361 vm->refcount = 1;
355 vm->map_sg = nv50_vm_map_sg; 362 vm->fpde = offset >> (vmm->pgt_bits + 12);
356 vm->unmap = nv50_vm_unmap; 363 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
357 vm->flush = nv50_vm_flush;
358 vm->spg_shift = 12;
359 vm->lpg_shift = 16;
360
361 pgt_bits = 29;
362 block = (1 << pgt_bits);
363 if (length < block)
364 block = length;
365
366 } else
367 if (dev_priv->card_type >= NV_C0) {
368 vm->map_pgt = nvc0_vm_map_pgt;
369 vm->map = nvc0_vm_map;
370 vm->map_sg = nvc0_vm_map_sg;
371 vm->unmap = nvc0_vm_unmap;
372 vm->flush = nvc0_vm_flush;
373 vm->spg_shift = 12;
374 vm->lpg_shift = 17;
375 pgt_bits = 27;
376 block = 4096;
377 } else {
378 kfree(vm);
379 return -ENOSYS;
380 }
381 364
382 vm->fpde = offset >> pgt_bits; 365 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
383 vm->lpde = (offset + length - 1) >> pgt_bits;
384 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
385 if (!vm->pgt) { 366 if (!vm->pgt) {
386 kfree(vm); 367 kfree(vm);
387 return -ENOMEM; 368 return -ENOMEM;
388 } 369 }
389 370
390 INIT_LIST_HEAD(&vm->pgd_list);
391 vm->dev = dev;
392 vm->refcount = 1;
393 vm->pgt_bits = pgt_bits - 12;
394
395 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, 371 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
396 block >> 12); 372 block >> 12);
397 if (ret) { 373 if (ret) {
374 kfree(vm->pgt);
398 kfree(vm); 375 kfree(vm);
399 return ret; 376 return ret;
400 } 377 }
401 378
402 *pvm = vm;
403 return 0; 379 return 0;
404} 380}
405 381
382int
383nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
384 u64 mm_offset, struct nouveau_vm **pvm)
385{
386 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
387 return vmm->create(vmm, offset, length, mm_offset, pvm);
388}
389
406static int 390static int
407nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) 391nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
408{ 392{
393 struct nouveau_vmmgr *vmm = vm->vmm;
409 struct nouveau_vm_pgd *vpgd; 394 struct nouveau_vm_pgd *vpgd;
410 int i; 395 int i;
411 396
@@ -420,7 +405,7 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
420 405
421 mutex_lock(&vm->mm.mutex); 406 mutex_lock(&vm->mm.mutex);
422 for (i = vm->fpde; i <= vm->lpde; i++) 407 for (i = vm->fpde; i <= vm->lpde; i++)
423 vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); 408 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
424 list_add(&vpgd->head, &vm->pgd_list); 409 list_add(&vpgd->head, &vm->pgd_list);
425 mutex_unlock(&vm->mm.mutex); 410 mutex_unlock(&vm->mm.mutex);
426 return 0; 411 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
new file mode 100644
index 000000000000..6adbbc9cc361
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26
27#include "nv04.h"
28
29#define NV04_PDMA_SIZE (128 * 1024 * 1024)
30#define NV04_PDMA_PAGE ( 4 * 1024)
31
32/*******************************************************************************
33 * VM map/unmap callbacks
34 ******************************************************************************/
35
36static void
37nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
38 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
39{
40 pte = 0x00008 + (pte * 4);
41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++;
44 while (cnt && page--) {
45 nv_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE;
47 pte += 4;
48 cnt -= 1;
49 }
50 }
51}
52
53static void
54nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
55{
56 pte = 0x00008 + (pte * 4);
57 while (cnt--) {
58 nv_wo32(pgt, pte, 0x00000000);
59 pte += 4;
60 }
61}
62
63static void
64nv04_vm_flush(struct nouveau_vm *vm)
65{
66}
67
68/*******************************************************************************
69 * VM object
70 ******************************************************************************/
71
72int
73nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
74 struct nouveau_vm **pvm)
75{
76 return -EINVAL;
77}
78
79/*******************************************************************************
80 * VMMGR subdev
81 ******************************************************************************/
82
83static int
84nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
85 struct nouveau_oclass *oclass, void *data, u32 size,
86 struct nouveau_object **pobject)
87{
88 struct nv04_vmmgr_priv *priv;
89 struct nouveau_gpuobj *dma;
90 int ret;
91
92 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
93 "pcigart", &priv);
94 *pobject = nv_object(priv);
95 if (ret)
96 return ret;
97
98 priv->base.create = nv04_vm_create;
99 priv->base.limit = NV04_PDMA_SIZE;
100 priv->base.dma_bits = 32;
101 priv->base.pgt_bits = 32 - 12;
102 priv->base.spg_shift = 12;
103 priv->base.lpg_shift = 12;
104 priv->base.map_sg = nv04_vm_map_sg;
105 priv->base.unmap = nv04_vm_unmap;
106 priv->base.flush = nv04_vm_flush;
107
108 ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
109 &priv->vm);
110 if (ret)
111 return ret;
112
113 ret = nouveau_gpuobj_new(parent, NULL,
114 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
115 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
116 &priv->vm->pgt[0].obj[0]);
117 dma = priv->vm->pgt[0].obj[0];
118 priv->vm->pgt[0].refcount[0] = 1;
119 if (ret)
120 return ret;
121
122 nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
123 nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
124 return 0;
125}
126
127void
128nv04_vmmgr_dtor(struct nouveau_object *object)
129{
130 struct nv04_vmmgr_priv *priv = (void *)object;
131 if (priv->vm) {
132 nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
133 nouveau_vm_ref(NULL, &priv->vm, NULL);
134 }
135 if (priv->nullp) {
136 pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
137 priv->nullp, priv->null);
138 }
139 nouveau_vmmgr_destroy(&priv->base);
140}
141
142struct nouveau_oclass
143nv04_vmmgr_oclass = {
144 .handle = NV_SUBDEV(VM, 0x04),
145 .ofuncs = &(struct nouveau_ofuncs) {
146 .ctor = nv04_vmmgr_ctor,
147 .dtor = nv04_vmmgr_dtor,
148 .init = _nouveau_vmmgr_init,
149 .fini = _nouveau_vmmgr_fini,
150 },
151};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
new file mode 100644
index 000000000000..ec42d4bc86a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -0,0 +1,19 @@
1#ifndef __NV04_VMMGR_PRIV__
2#define __NV04_VMMGR_PRIV__
3
4#include <subdev/vm.h>
5
6struct nv04_vmmgr_priv {
7 struct nouveau_vmmgr base;
8 struct nouveau_vm *vm;
9 dma_addr_t null;
10 void *nullp;
11};
12
13static inline struct nv04_vmmgr_priv *
14nv04_vmmgr(void *obj)
15{
16 return (void *)nouveau_vmmgr(obj);
17}
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
new file mode 100644
index 000000000000..0203e1e12caa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/option.h>
27
28#include <subdev/timer.h>
29#include <subdev/vm.h>
30
31#include "nv04.h"
32
33#define NV41_GART_SIZE (512 * 1024 * 1024)
34#define NV41_GART_PAGE ( 4 * 1024)
35
36/*******************************************************************************
37 * VM map/unmap callbacks
38 ******************************************************************************/
39
40static void
41nv41_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
42 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
43{
44 pte = pte * 4;
45 while (cnt) {
46 u32 page = PAGE_SIZE / NV41_GART_PAGE;
47 u64 phys = (u64)*list++;
48 while (cnt && page--) {
49 nv_wo32(pgt, pte, (phys >> 7) | 1);
50 phys += NV41_GART_PAGE;
51 pte += 4;
52 cnt -= 1;
53 }
54 }
55}
56
57static void
58nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
59{
60 pte = pte * 4;
61 while (cnt--) {
62 nv_wo32(pgt, pte, 0x00000000);
63 pte += 4;
64 }
65}
66
67static void
68nv41_vm_flush(struct nouveau_vm *vm)
69{
70 struct nv04_vm_priv *priv = (void *)vm->vmm;
71
72 mutex_lock(&nv_subdev(priv)->mutex);
73 nv_wr32(priv, 0x100810, 0x00000022);
74 if (!nv_wait(priv, 0x100810, 0x00000020, 0x00000020)) {
75 nv_warn(priv, "flush timeout, 0x%08x\n",
76 nv_rd32(priv, 0x100810));
77 }
78 nv_wr32(priv, 0x100810, 0x00000000);
79 mutex_unlock(&nv_subdev(priv)->mutex);
80}
81
82/*******************************************************************************
83 * VMMGR subdev
84 ******************************************************************************/
85
86static int
87nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nouveau_device *device = nv_device(parent);
92 struct nv04_vmmgr_priv *priv;
93 int ret;
94
95 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
96 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
97 data, size, pobject);
98 }
99
100 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
101 "pciegart", &priv);
102 *pobject = nv_object(priv);
103 if (ret)
104 return ret;
105
106 priv->base.create = nv04_vm_create;
107 priv->base.limit = NV41_GART_SIZE;
108 priv->base.dma_bits = 39;
109 priv->base.pgt_bits = 32 - 12;
110 priv->base.spg_shift = 12;
111 priv->base.lpg_shift = 12;
112 priv->base.map_sg = nv41_vm_map_sg;
113 priv->base.unmap = nv41_vm_unmap;
114 priv->base.flush = nv41_vm_flush;
115
116 ret = nouveau_vm_create(&priv->base, 0, NV41_GART_SIZE, 0, 4096,
117 &priv->vm);
118 if (ret)
119 return ret;
120
121 ret = nouveau_gpuobj_new(parent, NULL,
122 (NV41_GART_SIZE / NV41_GART_PAGE) * 4,
123 16, NVOBJ_FLAG_ZERO_ALLOC,
124 &priv->vm->pgt[0].obj[0]);
125 priv->vm->pgt[0].refcount[0] = 1;
126 if (ret)
127 return ret;
128
129 return 0;
130}
131
132static int
133nv41_vmmgr_init(struct nouveau_object *object)
134{
135 struct nv04_vmmgr_priv *priv = (void *)object;
136 struct nouveau_gpuobj *dma = priv->vm->pgt[0].obj[0];
137 int ret;
138
139 ret = nouveau_vmmgr_init(&priv->base);
140 if (ret)
141 return ret;
142
143 nv_wr32(priv, 0x100800, dma->addr | 0x00000002);
144 nv_mask(priv, 0x10008c, 0x00000100, 0x00000100);
145 nv_wr32(priv, 0x100820, 0x00000000);
146 return 0;
147}
148
149struct nouveau_oclass
150nv41_vmmgr_oclass = {
151 .handle = NV_SUBDEV(VM, 0x41),
152 .ofuncs = &(struct nouveau_ofuncs) {
153 .ctor = nv41_vmmgr_ctor,
154 .dtor = nv04_vmmgr_dtor,
155 .init = nv41_vmmgr_init,
156 .fini = _nouveau_vmmgr_fini,
157 },
158};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
new file mode 100644
index 000000000000..0ac18d05a146
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -0,0 +1,248 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/gpuobj.h>
26#include <core/option.h>
27
28#include <subdev/timer.h>
29#include <subdev/vm.h>
30
31#include "nv04.h"
32
33#define NV44_GART_SIZE (512 * 1024 * 1024)
34#define NV44_GART_PAGE ( 4 * 1024)
35
36/*******************************************************************************
37 * VM map/unmap callbacks
38 ******************************************************************************/
39
40static void
41nv44_vm_fill(struct nouveau_gpuobj *pgt, dma_addr_t null,
42 dma_addr_t *list, u32 pte, u32 cnt)
43{
44 u32 base = (pte << 2) & ~0x0000000f;
45 u32 tmp[4];
46
47 tmp[0] = nv_ro32(pgt, base + 0x0);
48 tmp[1] = nv_ro32(pgt, base + 0x4);
49 tmp[2] = nv_ro32(pgt, base + 0x8);
50 tmp[3] = nv_ro32(pgt, base + 0xc);
51
52 while (cnt--) {
53 u32 addr = list ? (*list++ >> 12) : (null >> 12);
54 switch (pte++ & 0x3) {
55 case 0:
56 tmp[0] &= ~0x07ffffff;
57 tmp[0] |= addr;
58 break;
59 case 1:
60 tmp[0] &= ~0xf8000000;
61 tmp[0] |= addr << 27;
62 tmp[1] &= ~0x003fffff;
63 tmp[1] |= addr >> 5;
64 break;
65 case 2:
66 tmp[1] &= ~0xffc00000;
67 tmp[1] |= addr << 22;
68 tmp[2] &= ~0x0001ffff;
69 tmp[2] |= addr >> 10;
70 break;
71 case 3:
72 tmp[2] &= ~0xfffe0000;
73 tmp[2] |= addr << 17;
74 tmp[3] &= ~0x00000fff;
75 tmp[3] |= addr >> 15;
76 break;
77 }
78 }
79
80 nv_wo32(pgt, base + 0x0, tmp[0]);
81 nv_wo32(pgt, base + 0x4, tmp[1]);
82 nv_wo32(pgt, base + 0x8, tmp[2]);
83 nv_wo32(pgt, base + 0xc, tmp[3] | 0x40000000);
84}
85
86static void
87nv44_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
88 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
89{
90 struct nv04_vmmgr_priv *priv = (void *)vma->vm->vmm;
91 u32 tmp[4];
92 int i;
93
94 if (pte & 3) {
95 u32 max = 4 - (pte & 3);
96 u32 part = (cnt > max) ? max : cnt;
97 nv44_vm_fill(pgt, priv->null, list, pte, part);
98 pte += part;
99 list += part;
100 cnt -= part;
101 }
102
103 while (cnt >= 4) {
104 for (i = 0; i < 4; i++)
105 tmp[i] = *list++ >> 12;
106 nv_wo32(pgt, pte++ * 4, tmp[0] >> 0 | tmp[1] << 27);
107 nv_wo32(pgt, pte++ * 4, tmp[1] >> 5 | tmp[2] << 22);
108 nv_wo32(pgt, pte++ * 4, tmp[2] >> 10 | tmp[3] << 17);
109 nv_wo32(pgt, pte++ * 4, tmp[3] >> 15 | 0x40000000);
110 cnt -= 4;
111 }
112
113 if (cnt)
114 nv44_vm_fill(pgt, priv->null, list, pte, cnt);
115}
116
117static void
118nv44_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
119{
120 struct nv04_vmmgr_priv *priv = (void *)nouveau_vmmgr(pgt);
121
122 if (pte & 3) {
123 u32 max = 4 - (pte & 3);
124 u32 part = (cnt > max) ? max : cnt;
125 nv44_vm_fill(pgt, priv->null, NULL, pte, part);
126 pte += part;
127 cnt -= part;
128 }
129
130 while (cnt >= 4) {
131 nv_wo32(pgt, pte++ * 4, 0x00000000);
132 nv_wo32(pgt, pte++ * 4, 0x00000000);
133 nv_wo32(pgt, pte++ * 4, 0x00000000);
134 nv_wo32(pgt, pte++ * 4, 0x00000000);
135 cnt -= 4;
136 }
137
138 if (cnt)
139 nv44_vm_fill(pgt, priv->null, NULL, pte, cnt);
140}
141
142static void
143nv44_vm_flush(struct nouveau_vm *vm)
144{
145 struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
146 nv_wr32(priv, 0x100814, priv->base.limit - NV44_GART_PAGE);
147 nv_wr32(priv, 0x100808, 0x00000020);
148 if (!nv_wait(priv, 0x100808, 0x00000001, 0x00000001))
149 nv_error(priv, "timeout: 0x%08x\n", nv_rd32(priv, 0x100808));
150 nv_wr32(priv, 0x100808, 0x00000000);
151}
152
153/*******************************************************************************
154 * VMMGR subdev
155 ******************************************************************************/
156
157static int
158nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
159 struct nouveau_oclass *oclass, void *data, u32 size,
160 struct nouveau_object **pobject)
161{
162 struct nouveau_device *device = nv_device(parent);
163 struct nv04_vmmgr_priv *priv;
164 int ret;
165
166 if (!nouveau_boolopt(device->cfgopt, "NvPCIE", true)) {
167 return nouveau_object_ctor(parent, engine, &nv04_vmmgr_oclass,
168 data, size, pobject);
169 }
170
171 ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIEGART",
172 "pciegart", &priv);
173 *pobject = nv_object(priv);
174 if (ret)
175 return ret;
176
177 priv->base.create = nv04_vm_create;
178 priv->base.limit = NV44_GART_SIZE;
179 priv->base.dma_bits = 39;
180 priv->base.pgt_bits = 32 - 12;
181 priv->base.spg_shift = 12;
182 priv->base.lpg_shift = 12;
183 priv->base.map_sg = nv44_vm_map_sg;
184 priv->base.unmap = nv44_vm_unmap;
185 priv->base.flush = nv44_vm_flush;
186
187 priv->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &priv->null);
188 if (!priv->nullp) {
189 nv_error(priv, "unable to allocate dummy pages\n");
190 return -ENOMEM;
191 }
192
193 ret = nouveau_vm_create(&priv->base, 0, NV44_GART_SIZE, 0, 4096,
194 &priv->vm);
195 if (ret)
196 return ret;
197
198 ret = nouveau_gpuobj_new(parent, NULL,
199 (NV44_GART_SIZE / NV44_GART_PAGE) * 4,
200 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
201 &priv->vm->pgt[0].obj[0]);
202 priv->vm->pgt[0].refcount[0] = 1;
203 if (ret)
204 return ret;
205
206 return 0;
207}
208
209static int
210nv44_vmmgr_init(struct nouveau_object *object)
211{
212 struct nv04_vmmgr_priv *priv = (void *)object;
213 struct nouveau_gpuobj *gart = priv->vm->pgt[0].obj[0];
214 u32 addr;
215 int ret;
216
217 ret = nouveau_vmmgr_init(&priv->base);
218 if (ret)
219 return ret;
220
221 /* calculate vram address of this PRAMIN block, object must be
222 * allocated on 512KiB alignment, and not exceed a total size
223 * of 512KiB for this to work correctly
224 */
225 addr = nv_rd32(priv, 0x10020c);
226 addr -= ((gart->addr >> 19) + 1) << 19;
227
228 nv_wr32(priv, 0x100850, 0x80000000);
229 nv_wr32(priv, 0x100818, priv->null);
230 nv_wr32(priv, 0x100804, NV44_GART_SIZE);
231 nv_wr32(priv, 0x100850, 0x00008000);
232 nv_mask(priv, 0x10008c, 0x00000200, 0x00000200);
233 nv_wr32(priv, 0x100820, 0x00000000);
234 nv_wr32(priv, 0x10082c, 0x00000001);
235 nv_wr32(priv, 0x100800, addr | 0x00000010);
236 return 0;
237}
238
239struct nouveau_oclass
240nv44_vmmgr_oclass = {
241 .handle = NV_SUBDEV(VM, 0x44),
242 .ofuncs = &(struct nouveau_ofuncs) {
243 .ctor = nv44_vmmgr_ctor,
244 .dtor = nv04_vmmgr_dtor,
245 .init = nv44_vmmgr_init,
246 .fini = _nouveau_vmmgr_fini,
247 },
248};
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index c9fdfb48270b..e067f81c97b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -22,12 +22,19 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
28#include "nouveau_vm.h" 29#include <subdev/fb.h>
30#include <subdev/vm.h>
29 31
30void 32struct nv50_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
37static void
31nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, 38nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
33{ 40{
@@ -35,11 +42,11 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
35 u32 coverage = 0; 42 u32 coverage = 0;
36 43
37 if (pgt[0]) { 44 if (pgt[0]) {
38 phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ 45 phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
39 coverage = (pgt[0]->size >> 3) << 12; 46 coverage = (pgt[0]->size >> 3) << 12;
40 } else 47 } else
41 if (pgt[1]) { 48 if (pgt[1]) {
42 phys = 0x00000001 | pgt[1]->vinst; /* present */ 49 phys = 0x00000001 | pgt[1]->addr; /* present */
43 coverage = (pgt[1]->size >> 3) << 16; 50 coverage = (pgt[1]->size >> 3) << 16;
44 } 51 }
45 52
@@ -69,19 +76,18 @@ vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
69 return phys; 76 return phys;
70} 77}
71 78
72void 79static void
73nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 80nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
74 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 81 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
75{ 82{
76 struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
77 u32 comp = (mem->memtype & 0x180) >> 7; 83 u32 comp = (mem->memtype & 0x180) >> 7;
78 u32 block, target; 84 u32 block, target;
79 int i; 85 int i;
80 86
81 /* IGPs don't have real VRAM, re-target to stolen system memory */ 87 /* IGPs don't have real VRAM, re-target to stolen system memory */
82 target = 0; 88 target = 0;
83 if (dev_priv->vram_sys_base) { 89 if (nouveau_fb(vma->vm->vmm)->ram.stolen) {
84 phys += dev_priv->vram_sys_base; 90 phys += nouveau_fb(vma->vm->vmm)->ram.stolen;
85 target = 3; 91 target = 3;
86 } 92 }
87 93
@@ -103,7 +109,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
103 phys += block << (vma->node->type - 3); 109 phys += block << (vma->node->type - 3);
104 cnt -= block; 110 cnt -= block;
105 if (comp) { 111 if (comp) {
106 u32 tag = mem->tag->start + ((delta >> 16) * comp); 112 u32 tag = mem->tag->offset + ((delta >> 16) * comp);
107 offset_h |= (tag << 17); 113 offset_h |= (tag << 17);
108 delta += block << (vma->node->type - 3); 114 delta += block << (vma->node->type - 3);
109 } 115 }
@@ -117,7 +123,7 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
117 } 123 }
118} 124}
119 125
120void 126static void
121nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 127nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
122 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 128 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
123{ 129{
@@ -131,7 +137,7 @@ nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
131 } 137 }
132} 138}
133 139
134void 140static void
135nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) 141nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
136{ 142{
137 pte <<= 3; 143 pte <<= 3;
@@ -142,36 +148,80 @@ nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
142 } 148 }
143} 149}
144 150
145void 151static void
146nv50_vm_flush(struct nouveau_vm *vm) 152nv50_vm_flush(struct nouveau_vm *vm)
147{ 153{
148 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 154 struct nouveau_engine *engine;
149 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 int i; 155 int i;
151 156
152 pinstmem->flush(vm->dev); 157 for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
153 158 if (atomic_read(&vm->engref[i])) {
154 /* BAR */ 159 engine = nouveau_engine(vm->vmm, i);
155 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) { 160 if (engine && engine->tlb_flush)
156 nv50_vm_flush_engine(vm->dev, 6); 161 engine->tlb_flush(engine);
157 return; 162 }
158 }
159
160 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
161 if (atomic_read(&vm->engref[i]))
162 dev_priv->eng[i]->tlb_flush(vm->dev, i);
163 } 163 }
164} 164}
165 165
166void 166void
167nv50_vm_flush_engine(struct drm_device *dev, int engine) 167nv50_vm_flush_engine(struct nouveau_subdev *subdev, int engine)
168{ 168{
169 struct drm_nouveau_private *dev_priv = dev->dev_private; 169 struct nv50_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
170 unsigned long flags; 170 unsigned long flags;
171 171
172 spin_lock_irqsave(&dev_priv->vm_lock, flags); 172 spin_lock_irqsave(&priv->lock, flags);
173 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 173 nv_wr32(subdev, 0x100c80, (engine << 16) | 1);
174 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 174 if (!nv_wait(subdev, 0x100c80, 0x00000001, 0x00000000))
175 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 175 nv_error(subdev, "vm flush timeout: engine %d\n", engine);
176 spin_unlock_irqrestore(&dev_priv->vm_lock, flags); 176 spin_unlock_irqrestore(&priv->lock, flags);
177}
178
179static int
180nv50_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
181 u64 mm_offset, struct nouveau_vm **pvm)
182{
183 u32 block = (1 << (vmm->pgt_bits + 12));
184 if (block > length)
185 block = length;
186
187 return nouveau_vm_create(vmm, offset, length, mm_offset, block, pvm);
177} 188}
189
190static int
191nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
192 struct nouveau_oclass *oclass, void *data, u32 size,
193 struct nouveau_object **pobject)
194{
195 struct nv50_vmmgr_priv *priv;
196 int ret;
197
198 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
199 *pobject = nv_object(priv);
200 if (ret)
201 return ret;
202
203 priv->base.limit = 1ULL << 40;
204 priv->base.dma_bits = 40;
205 priv->base.pgt_bits = 29 - 12;
206 priv->base.spg_shift = 12;
207 priv->base.lpg_shift = 16;
208 priv->base.create = nv50_vm_create;
209 priv->base.map_pgt = nv50_vm_map_pgt;
210 priv->base.map = nv50_vm_map;
211 priv->base.map_sg = nv50_vm_map_sg;
212 priv->base.unmap = nv50_vm_unmap;
213 priv->base.flush = nv50_vm_flush;
214 spin_lock_init(&priv->lock);
215 return 0;
216}
217
218struct nouveau_oclass
219nv50_vmmgr_oclass = {
220 .handle = NV_SUBDEV(VM, 0x50),
221 .ofuncs = &(struct nouveau_ofuncs) {
222 .ctor = nv50_vmmgr_ctor,
223 .dtor = _nouveau_vmmgr_dtor,
224 .init = _nouveau_vmmgr_init,
225 .fini = _nouveau_vmmgr_fini,
226 },
227};
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index fad338314881..30c61e6c2017 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -22,21 +22,28 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/device.h>
26#include <core/gpuobj.h>
26 27
27#include "nouveau_drv.h" 28#include <subdev/timer.h>
28#include "nouveau_vm.h" 29#include <subdev/fb.h>
30#include <subdev/vm.h>
29 31
30void 32struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base;
34 spinlock_t lock;
35};
36
37static void
31nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, 38nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
32 struct nouveau_gpuobj *pgt[2]) 39 struct nouveau_gpuobj *pgt[2])
33{ 40{
34 u32 pde[2] = { 0, 0 }; 41 u32 pde[2] = { 0, 0 };
35 42
36 if (pgt[0]) 43 if (pgt[0])
37 pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); 44 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
38 if (pgt[1]) 45 if (pgt[1])
39 pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); 46 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
40 47
41 nv_wo32(pgd, (index * 8) + 0, pde[0]); 48 nv_wo32(pgd, (index * 8) + 0, pde[0]);
42 nv_wo32(pgd, (index * 8) + 4, pde[1]); 49 nv_wo32(pgd, (index * 8) + 4, pde[1]);
@@ -57,7 +64,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
57 return phys; 64 return phys;
58} 65}
59 66
60void 67static void
61nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 68nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
62 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta) 69 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
63{ 70{
@@ -73,7 +80,7 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
73 } 80 }
74} 81}
75 82
76void 83static void
77nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, 84nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
78 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list) 85 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
79{ 86{
@@ -88,7 +95,7 @@ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
88 } 95 }
89} 96}
90 97
91void 98static void
92nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) 99nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
93{ 100{
94 pte <<= 3; 101 pte <<= 3;
@@ -100,37 +107,83 @@ nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
100} 107}
101 108
102void 109void
103nvc0_vm_flush(struct nouveau_vm *vm) 110nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
104{ 111{
105 struct drm_nouveau_private *dev_priv = vm->dev->dev_private; 112 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
106 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
107 struct drm_device *dev = vm->dev;
108 struct nouveau_vm_pgd *vpgd;
109 unsigned long flags; 113 unsigned long flags;
110 u32 engine;
111 114
112 engine = 1; 115 /* looks like maybe a "free flush slots" counter, the
113 if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) 116 * faster you write to 0x100cbc to more it decreases
114 engine |= 4; 117 */
118 spin_lock_irqsave(&priv->lock, flags);
119 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120 nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
121 nv_rd32(subdev, 0x100c80), type);
122 }
123
124 nv_wr32(subdev, 0x100cb8, addr >> 8);
125 nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
126
127 /* wait for flush to be queued? */
128 if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129 nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
130 nv_rd32(subdev, 0x100c80), type);
131 }
132 spin_unlock_irqrestore(&priv->lock, flags);
133}
115 134
116 pinstmem->flush(vm->dev); 135static void
136nvc0_vm_flush(struct nouveau_vm *vm)
137{
138 struct nouveau_vm_pgd *vpgd;
117 139
118 spin_lock_irqsave(&dev_priv->vm_lock, flags);
119 list_for_each_entry(vpgd, &vm->pgd_list, head) { 140 list_for_each_entry(vpgd, &vm->pgd_list, head) {
120 /* looks like maybe a "free flush slots" counter, the 141 nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
121 * faster you write to 0x100cbc to more it decreases
122 */
123 if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
124 NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
125 nv_rd32(dev, 0x100c80), engine);
126 }
127 nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
128 nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
129 /* wait for flush to be queued? */
130 if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
131 NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
132 nv_rd32(dev, 0x100c80), engine);
133 }
134 } 142 }
135 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
136} 143}
144
145static int
146nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
147 u64 mm_offset, struct nouveau_vm **pvm)
148{
149 return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
150}
151
152static int
153nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nouveau_oclass *oclass, void *data, u32 size,
155 struct nouveau_object **pobject)
156{
157 struct nvc0_vmmgr_priv *priv;
158 int ret;
159
160 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
161 *pobject = nv_object(priv);
162 if (ret)
163 return ret;
164
165 priv->base.limit = 1ULL << 40;
166 priv->base.dma_bits = 40;
167 priv->base.pgt_bits = 27 - 12;
168 priv->base.spg_shift = 12;
169 priv->base.lpg_shift = 17;
170 priv->base.create = nvc0_vm_create;
171 priv->base.map_pgt = nvc0_vm_map_pgt;
172 priv->base.map = nvc0_vm_map;
173 priv->base.map_sg = nvc0_vm_map_sg;
174 priv->base.unmap = nvc0_vm_unmap;
175 priv->base.flush = nvc0_vm_flush;
176 spin_lock_init(&priv->lock);
177 return 0;
178}
179
180struct nouveau_oclass
181nvc0_vmmgr_oclass = {
182 .handle = NV_SUBDEV(VM, 0xc0),
183 .ofuncs = &(struct nouveau_ofuncs) {
184 .ctor = nvc0_vmmgr_ctor,
185 .dtor = _nouveau_vmmgr_dtor,
186 .init = _nouveau_vmmgr_init,
187 .fini = _nouveau_vmmgr_fini,
188 },
189};
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 111d9eba7065..cc79c796afee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,23 +21,153 @@
21 * 21 *
22 */ 22 */
23 23
24#include <drm/drmP.h> 24#include <core/object.h>
25#include <core/client.h>
26#include <core/device.h>
27#include <core/class.h>
28#include <core/mm.h>
25 29
26#include "nouveau_drv.h" 30#include <subdev/fb.h>
31#include <subdev/timer.h>
32#include <subdev/instmem.h>
33
34#include "nouveau_drm.h"
27#include "nouveau_dma.h" 35#include "nouveau_dma.h"
36#include "nouveau_gem.h"
37#include "nouveau_chan.h"
28#include "nouveau_abi16.h" 38#include "nouveau_abi16.h"
29#include "nouveau_ramht.h" 39
30#include "nouveau_software.h" 40struct nouveau_abi16 *
41nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
42{
43 struct nouveau_cli *cli = nouveau_cli(file_priv);
44 mutex_lock(&cli->mutex);
45 if (!cli->abi16) {
46 struct nouveau_abi16 *abi16;
47 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 if (cli->abi16) {
49 INIT_LIST_HEAD(&abi16->channels);
50 abi16->client = nv_object(cli);
51
52 /* allocate device object targeting client's default
53 * device (ie. the one that belongs to the fd it
54 * opened)
55 */
56 if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
57 NVDRM_DEVICE, 0x0080,
58 &(struct nv_device_class) {
59 .device = ~0ULL,
60 },
61 sizeof(struct nv_device_class),
62 &abi16->device) == 0)
63 return cli->abi16;
64
65 kfree(cli->abi16);
66 cli->abi16 = NULL;
67 }
68
69 mutex_unlock(&cli->mutex);
70 }
71 return cli->abi16;
72}
73
74int
75nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
76{
77 struct nouveau_cli *cli = (void *)abi16->client;
78 mutex_unlock(&cli->mutex);
79 return ret;
80}
81
82u16
83nouveau_abi16_swclass(struct nouveau_drm *drm)
84{
85 switch (nv_device(drm->device)->card_type) {
86 case NV_04:
87 return 0x006e;
88 case NV_10:
89 case NV_20:
90 case NV_30:
91 case NV_40:
92 return 0x016e;
93 case NV_50:
94 return 0x506e;
95 case NV_C0:
96 case NV_D0:
97 case NV_E0:
98 return 0x906e;
99 }
100
101 return 0x0000;
102}
103
104static void
105nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
106 struct nouveau_abi16_ntfy *ntfy)
107{
108 nouveau_mm_free(&chan->heap, &ntfy->node);
109 list_del(&ntfy->head);
110 kfree(ntfy);
111}
112
113static void
114nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
115 struct nouveau_abi16_chan *chan)
116{
117 struct nouveau_abi16_ntfy *ntfy, *temp;
118
119 /* cleanup notifier state */
120 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
121 nouveau_abi16_ntfy_fini(chan, ntfy);
122 }
123
124 if (chan->ntfy) {
125 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
126 drm_gem_object_unreference_unlocked(chan->ntfy->gem);
127 }
128
129 if (chan->heap.block_size)
130 nouveau_mm_fini(&chan->heap);
131
132 /* destroy channel object, all children will be killed too */
133 if (chan->chan) {
134 abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
135 nouveau_channel_del(&chan->chan);
136 }
137
138 list_del(&chan->head);
139 kfree(chan);
140}
141
142void
143nouveau_abi16_fini(struct nouveau_abi16 *abi16)
144{
145 struct nouveau_cli *cli = (void *)abi16->client;
146 struct nouveau_abi16_chan *chan, *temp;
147
148 /* cleanup channels */
149 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
150 nouveau_abi16_chan_fini(abi16, chan);
151 }
152
153 /* destroy the device object */
154 nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
155
156 kfree(cli->abi16);
157 cli->abi16 = NULL;
158}
31 159
32int 160int
33nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) 161nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
34{ 162{
35 struct drm_nouveau_private *dev_priv = dev->dev_private; 163 struct nouveau_drm *drm = nouveau_drm(dev);
164 struct nouveau_device *device = nv_device(drm->device);
165 struct nouveau_timer *ptimer = nouveau_timer(device);
36 struct drm_nouveau_getparam *getparam = data; 166 struct drm_nouveau_getparam *getparam = data;
37 167
38 switch (getparam->param) { 168 switch (getparam->param) {
39 case NOUVEAU_GETPARAM_CHIPSET_ID: 169 case NOUVEAU_GETPARAM_CHIPSET_ID:
40 getparam->value = dev_priv->chipset; 170 getparam->value = device->chipset;
41 break; 171 break;
42 case NOUVEAU_GETPARAM_PCI_VENDOR: 172 case NOUVEAU_GETPARAM_PCI_VENDOR:
43 getparam->value = dev->pci_vendor; 173 getparam->value = dev->pci_vendor;
@@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
55 getparam->value = 2; 185 getparam->value = 2;
56 break; 186 break;
57 case NOUVEAU_GETPARAM_FB_SIZE: 187 case NOUVEAU_GETPARAM_FB_SIZE:
58 getparam->value = dev_priv->fb_available_size; 188 getparam->value = drm->gem.vram_available;
59 break; 189 break;
60 case NOUVEAU_GETPARAM_AGP_SIZE: 190 case NOUVEAU_GETPARAM_AGP_SIZE:
61 getparam->value = dev_priv->gart_info.aper_size; 191 getparam->value = drm->gem.gart_available;
62 break; 192 break;
63 case NOUVEAU_GETPARAM_VM_VRAM_BASE: 193 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
64 getparam->value = 0; /* deprecated */ 194 getparam->value = 0; /* deprecated */
65 break; 195 break;
66 case NOUVEAU_GETPARAM_PTIMER_TIME: 196 case NOUVEAU_GETPARAM_PTIMER_TIME:
67 getparam->value = dev_priv->engine.timer.read(dev); 197 getparam->value = ptimer->read(ptimer);
68 break; 198 break;
69 case NOUVEAU_GETPARAM_HAS_BO_USAGE: 199 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
70 getparam->value = 1; 200 getparam->value = 1;
@@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
76 /* NV40 and NV50 versions are quite different, but register 206 /* NV40 and NV50 versions are quite different, but register
77 * address is the same. User is supposed to know the card 207 * address is the same. User is supposed to know the card
78 * family anyway... */ 208 * family anyway... */
79 if (dev_priv->chipset >= 0x40) { 209 if (device->chipset >= 0x40) {
80 getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS); 210 getparam->value = nv_rd32(device, 0x001540);
81 break; 211 break;
82 } 212 }
83 /* FALLTHRU */ 213 /* FALLTHRU */
84 default: 214 default:
85 NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); 215 nv_debug(device, "unknown parameter %lld\n", getparam->param);
86 return -EINVAL; 216 return -EINVAL;
87 } 217 }
88 218
@@ -98,148 +228,252 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
98int 228int
99nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) 229nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
100{ 230{
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 struct drm_nouveau_channel_alloc *init = data; 231 struct drm_nouveau_channel_alloc *init = data;
103 struct nouveau_channel *chan; 232 struct nouveau_cli *cli = nouveau_cli(file_priv);
233 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
235 struct nouveau_abi16_chan *chan;
236 struct nouveau_client *client;
237 struct nouveau_device *device;
238 struct nouveau_instmem *imem;
239 struct nouveau_fb *pfb;
104 int ret; 240 int ret;
105 241
106 if (!dev_priv->eng[NVOBJ_ENGINE_GR]) 242 if (unlikely(!abi16))
107 return -ENODEV; 243 return -ENOMEM;
244 client = nv_client(abi16->client);
108 245
109 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 246 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
110 return -EINVAL; 247 return nouveau_abi16_put(abi16, -EINVAL);
248
249 device = nv_device(abi16->device);
250 imem = nouveau_instmem(device);
251 pfb = nouveau_fb(device);
252
253 /* allocate "abi16 channel" data and make up a handle for it */
254 init->channel = ffsll(~abi16->handles);
255 if (!init->channel--)
256 return nouveau_abi16_put(abi16, -ENOSPC);
257
258 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
259 if (!chan)
260 return nouveau_abi16_put(abi16, -ENOMEM);
261
262 INIT_LIST_HEAD(&chan->notifiers);
263 list_add(&chan->head, &abi16->channels);
264 abi16->handles |= (1 << init->channel);
265
266 /* create channel object and initialise dma and fence management */
267 if (device->card_type >= NV_E0) {
268 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
269 init->tt_ctxdma_handle = 0;
270 }
111 271
112 ret = nouveau_channel_alloc(dev, &chan, file_priv, 272 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
113 init->fb_ctxdma_handle, 273 init->channel, init->fb_ctxdma_handle,
114 init->tt_ctxdma_handle); 274 init->tt_ctxdma_handle, &chan->chan);
115 if (ret) 275 if (ret)
116 return ret; 276 goto done;
117 init->channel = chan->id; 277
118 278 if (device->card_type >= NV_50)
119 if (nouveau_vram_pushbuf == 0) { 279 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
120 if (chan->dma.ib_max) 280 NOUVEAU_GEM_DOMAIN_GART;
121 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | 281 else
122 NOUVEAU_GEM_DOMAIN_GART; 282 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
123 else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
124 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
125 else
126 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
127 } else {
128 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; 283 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
129 } 284 else
285 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
130 286
131 if (dev_priv->card_type < NV_C0) { 287 if (device->card_type < NV_C0) {
132 init->subchan[0].handle = 0x00000000; 288 init->subchan[0].handle = 0x00000000;
133 init->subchan[0].grclass = 0x0000; 289 init->subchan[0].grclass = 0x0000;
134 init->subchan[1].handle = NvSw; 290 init->subchan[1].handle = NvSw;
135 init->subchan[1].grclass = NV_SW; 291 init->subchan[1].grclass = 0x506e;
136 init->nr_subchan = 2; 292 init->nr_subchan = 2;
137 } 293 }
138 294
139 /* Named memory object area */ 295 /* Named memory object area */
140 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, 296 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
297 0, 0, &chan->ntfy);
298 if (ret == 0)
299 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
300 if (ret)
301 goto done;
302
303 if (device->card_type >= NV_50) {
304 ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
305 &chan->ntfy_vma);
306 if (ret)
307 goto done;
308 }
309
310 ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
141 &init->notifier_handle); 311 &init->notifier_handle);
312 if (ret)
313 goto done;
142 314
143 if (ret == 0) 315 ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
144 atomic_inc(&chan->users); /* userspace reference */ 316done:
145 nouveau_channel_put(&chan); 317 if (ret)
146 return ret; 318 nouveau_abi16_chan_fini(abi16, chan);
319 return nouveau_abi16_put(abi16, ret);
147} 320}
148 321
322
149int 323int
150nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS) 324nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
151{ 325{
152 struct drm_nouveau_channel_free *req = data; 326 struct drm_nouveau_channel_free *req = data;
153 struct nouveau_channel *chan; 327 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
328 struct nouveau_abi16_chan *chan;
329 int ret = -ENOENT;
154 330
155 chan = nouveau_channel_get(file_priv, req->channel); 331 if (unlikely(!abi16))
156 if (IS_ERR(chan)) 332 return -ENOMEM;
157 return PTR_ERR(chan);
158 333
159 list_del(&chan->list); 334 list_for_each_entry(chan, &abi16->channels, head) {
160 atomic_dec(&chan->users); 335 if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
161 nouveau_channel_put(&chan); 336 nouveau_abi16_chan_fini(abi16, chan);
162 return 0; 337 return nouveau_abi16_put(abi16, 0);
338 }
339 }
340
341 return nouveau_abi16_put(abi16, ret);
163} 342}
164 343
165int 344int
166nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) 345nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
167{ 346{
168 struct drm_nouveau_grobj_alloc *init = data; 347 struct drm_nouveau_grobj_alloc *init = data;
169 struct nouveau_channel *chan; 348 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
349 struct nouveau_drm *drm = nouveau_drm(dev);
350 struct nouveau_object *object;
170 int ret; 351 int ret;
171 352
353 if (unlikely(!abi16))
354 return -ENOMEM;
355
172 if (init->handle == ~0) 356 if (init->handle == ~0)
173 return -EINVAL; 357 return nouveau_abi16_put(abi16, -EINVAL);
174 358
175 /* compatibility with userspace that assumes 506e for all chipsets */ 359 /* compatibility with userspace that assumes 506e for all chipsets */
176 if (init->class == 0x506e) { 360 if (init->class == 0x506e) {
177 init->class = nouveau_software_class(dev); 361 init->class = nouveau_abi16_swclass(drm);
178 if (init->class == 0x906e) 362 if (init->class == 0x906e)
179 return 0; 363 return nouveau_abi16_put(abi16, 0);
180 } else
181 if (init->class == 0x906e) {
182 NV_DEBUG(dev, "906e not supported yet\n");
183 return -EINVAL;
184 }
185
186 chan = nouveau_channel_get(file_priv, init->channel);
187 if (IS_ERR(chan))
188 return PTR_ERR(chan);
189
190 if (nouveau_ramht_find(chan, init->handle)) {
191 ret = -EEXIST;
192 goto out;
193 }
194
195 ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
196 if (ret) {
197 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
198 ret, init->channel, init->handle);
199 } 364 }
200 365
201out: 366 ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
202 nouveau_channel_put(&chan); 367 init->handle, init->class, NULL, 0, &object);
203 return ret; 368 return nouveau_abi16_put(abi16, ret);
204} 369}
205 370
206int 371int
207nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) 372nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
208{ 373{
209 struct drm_nouveau_private *dev_priv = dev->dev_private; 374 struct drm_nouveau_notifierobj_alloc *info = data;
210 struct drm_nouveau_notifierobj_alloc *na = data; 375 struct nouveau_drm *drm = nouveau_drm(dev);
211 struct nouveau_channel *chan; 376 struct nouveau_device *device = nv_device(drm->device);
377 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
378 struct nouveau_abi16_chan *chan, *temp;
379 struct nouveau_abi16_ntfy *ntfy;
380 struct nouveau_object *object;
381 struct nv_dma_class args;
212 int ret; 382 int ret;
213 383
384 if (unlikely(!abi16))
385 return -ENOMEM;
386
214 /* completely unnecessary for these chipsets... */ 387 /* completely unnecessary for these chipsets... */
215 if (unlikely(dev_priv->card_type >= NV_C0)) 388 if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
216 return -EINVAL; 389 return nouveau_abi16_put(abi16, -EINVAL);
217 390
218 chan = nouveau_channel_get(file_priv, na->channel); 391 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
219 if (IS_ERR(chan)) 392 if (chan->chan->handle == (NVDRM_CHAN | info->channel))
220 return PTR_ERR(chan); 393 break;
394 chan = NULL;
395 }
221 396
222 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, 397 if (!chan)
223 &na->offset); 398 return nouveau_abi16_put(abi16, -ENOENT);
224 nouveau_channel_put(&chan); 399
225 return ret; 400 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
401 if (!ntfy)
402 return nouveau_abi16_put(abi16, -ENOMEM);
403
404 list_add(&ntfy->head, &chan->notifiers);
405 ntfy->handle = info->handle;
406
407 ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
408 &ntfy->node);
409 if (ret)
410 goto done;
411
412 args.start = ntfy->node->offset;
413 args.limit = ntfy->node->offset + ntfy->node->length - 1;
414 if (device->card_type >= NV_50) {
415 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
416 args.start += chan->ntfy_vma.offset;
417 args.limit += chan->ntfy_vma.offset;
418 } else
419 if (drm->agp.stat == ENABLED) {
420 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
421 args.start += drm->agp.base + chan->ntfy->bo.offset;
422 args.limit += drm->agp.base + chan->ntfy->bo.offset;
423 } else {
424 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
425 args.start += chan->ntfy->bo.offset;
426 args.limit += chan->ntfy->bo.offset;
427 }
428
429 ret = nouveau_object_new(abi16->client, chan->chan->handle,
430 ntfy->handle, 0x003d, &args,
431 sizeof(args), &object);
432 if (ret)
433 goto done;
434
435done:
436 if (ret)
437 nouveau_abi16_ntfy_fini(chan, ntfy);
438 return nouveau_abi16_put(abi16, ret);
226} 439}
227 440
228int 441int
229nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) 442nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
230{ 443{
231 struct drm_nouveau_gpuobj_free *objfree = data; 444 struct drm_nouveau_gpuobj_free *fini = data;
232 struct nouveau_channel *chan; 445 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
446 struct nouveau_abi16_chan *chan, *temp;
447 struct nouveau_abi16_ntfy *ntfy;
233 int ret; 448 int ret;
234 449
235 chan = nouveau_channel_get(file_priv, objfree->channel); 450 if (unlikely(!abi16))
236 if (IS_ERR(chan)) 451 return -ENOMEM;
237 return PTR_ERR(chan); 452
453 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
454 if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
455 break;
456 chan = NULL;
457 }
458
459 if (!chan)
460 return nouveau_abi16_put(abi16, -ENOENT);
238 461
239 /* Synchronize with the user channel */ 462 /* synchronize with the user channel and destroy the gpu object */
240 nouveau_channel_idle(chan); 463 nouveau_channel_idle(chan->chan);
241 464
242 ret = nouveau_ramht_remove(chan, objfree->handle); 465 ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
243 nouveau_channel_put(&chan); 466 if (ret)
244 return ret; 467 return nouveau_abi16_put(abi16, ret);
468
469 /* cleanup extra state if this object was a notifier */
470 list_for_each_entry(ntfy, &chan->notifiers, head) {
471 if (ntfy->handle == fini->handle) {
472 nouveau_mm_free(&chan->heap, &ntfy->node);
473 list_del(&ntfy->head);
474 break;
475 }
476 }
477
478 return nouveau_abi16_put(abi16, 0);
245} 479}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index e6328b008a8c..90004081a501 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -3,6 +3,7 @@
3 3
4#define ABI16_IOCTL_ARGS \ 4#define ABI16_IOCTL_ARGS \
5 struct drm_device *dev, void *data, struct drm_file *file_priv 5 struct drm_device *dev, void *data, struct drm_file *file_priv
6
6int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS); 7int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
7int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS); 8int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
8int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS); 9int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
11int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS); 12int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
12int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS); 13int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
13 14
15struct nouveau_abi16_ntfy {
16 struct list_head head;
17 struct nouveau_mm_node *node;
18 u32 handle;
19};
20
21struct nouveau_abi16_chan {
22 struct list_head head;
23 struct nouveau_channel *chan;
24 struct list_head notifiers;
25 struct nouveau_bo *ntfy;
26 struct nouveau_vma ntfy_vma;
27 struct nouveau_mm heap;
28};
29
30struct nouveau_abi16 {
31 struct nouveau_object *client;
32 struct nouveau_object *device;
33 struct list_head channels;
34 u64 handles;
35};
36
37struct nouveau_drm;
38struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
39int nouveau_abi16_put(struct nouveau_abi16 *, int);
40void nouveau_abi16_fini(struct nouveau_abi16 *);
41u16 nouveau_abi16_swclass(struct nouveau_drm *);
42
43#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
44#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
45
14struct drm_nouveau_channel_alloc { 46struct drm_nouveau_channel_alloc {
15 uint32_t fb_ctxdma_handle; 47 uint32_t fb_ctxdma_handle;
16 uint32_t tt_ctxdma_handle; 48 uint32_t tt_ctxdma_handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index dea42bc515ec..48783e14114c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -7,15 +7,13 @@
7#include <acpi/acpi.h> 7#include <acpi/acpi.h>
8#include <linux/mxm-wmi.h> 8#include <linux/mxm-wmi.h>
9 9
10#include <drm/drmP.h>
11#include <drm/drm_crtc_helper.h>
12#include "nouveau_drv.h"
13#include <drm/nouveau_drm.h>
14#include "nv50_display.h"
15#include "nouveau_connector.h"
16
17#include <linux/vga_switcheroo.h> 10#include <linux/vga_switcheroo.h>
18 11
12#include <drm/drm_edid.h>
13
14#include "nouveau_drm.h"
15#include "nouveau_acpi.h"
16
19#define NOUVEAU_DSM_LED 0x02 17#define NOUVEAU_DSM_LED 0x02
20#define NOUVEAU_DSM_LED_STATE 0x00 18#define NOUVEAU_DSM_LED_STATE 0x00
21#define NOUVEAU_DSM_LED_OFF 0x10 19#define NOUVEAU_DSM_LED_OFF 0x10
@@ -388,10 +386,9 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
388 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); 386 return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
389} 387}
390 388
391int 389void *
392nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) 390nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
393{ 391{
394 struct nouveau_connector *nv_connector = nouveau_connector(connector);
395 struct acpi_device *acpidev; 392 struct acpi_device *acpidev;
396 acpi_handle handle; 393 acpi_handle handle;
397 int type, ret; 394 int type, ret;
@@ -403,21 +400,20 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
403 type = ACPI_VIDEO_DISPLAY_LCD; 400 type = ACPI_VIDEO_DISPLAY_LCD;
404 break; 401 break;
405 default: 402 default:
406 return -EINVAL; 403 return NULL;
407 } 404 }
408 405
409 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 406 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
410 if (!handle) 407 if (!handle)
411 return -ENODEV; 408 return NULL;
412 409
413 ret = acpi_bus_get_device(handle, &acpidev); 410 ret = acpi_bus_get_device(handle, &acpidev);
414 if (ret) 411 if (ret)
415 return -ENODEV; 412 return NULL;
416 413
417 ret = acpi_video_get_edid(acpidev, type, -1, &edid); 414 ret = acpi_video_get_edid(acpidev, type, -1, &edid);
418 if (ret < 0) 415 if (ret < 0)
419 return ret; 416 return NULL;
420 417
421 nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); 418 return kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
422 return 0;
423} 419}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
new file mode 100644
index 000000000000..08af67722b57
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -0,0 +1,22 @@
1#ifndef __NOUVEAU_ACPI_H__
2#define __NOUVEAU_ACPI_H__
3
4#define ROM_BIOS_PAGE 4096
5
6#if defined(CONFIG_ACPI)
7void nouveau_register_dsm_handler(void);
8void nouveau_unregister_dsm_handler(void);
9void nouveau_switcheroo_optimus_dsm(void);
10int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
11bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
12void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
13#else
14static inline void nouveau_register_dsm_handler(void) {}
15static inline void nouveau_unregister_dsm_handler(void) {}
16static inline void nouveau_switcheroo_optimus_dsm(void) {}
17static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
18static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
19static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
20#endif
21
22#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
new file mode 100644
index 000000000000..d28430cd2ba6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -0,0 +1,152 @@
1#include <linux/module.h>
2
3#include <core/device.h>
4
5#include "nouveau_drm.h"
6#include "nouveau_agp.h"
7#include "nouveau_reg.h"
8
9#if __OS_HAS_AGP
10MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
11static int nouveau_agpmode = -1;
12module_param_named(agpmode, nouveau_agpmode, int, 0400);
13
14static unsigned long
15get_agp_mode(struct nouveau_drm *drm, unsigned long mode)
16{
17 struct nouveau_device *device = nv_device(drm->device);
18
19 /*
20 * FW seems to be broken on nv18, it makes the card lock up
21 * randomly.
22 */
23 if (device->chipset == 0x18)
24 mode &= ~PCI_AGP_COMMAND_FW;
25
26 /*
27 * AGP mode set in the command line.
28 */
29 if (nouveau_agpmode > 0) {
30 bool agpv3 = mode & 0x8;
31 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
32
33 mode = (mode & ~0x7) | (rate & 0x7);
34 }
35
36 return mode;
37}
38
39static bool
40nouveau_agp_enabled(struct nouveau_drm *drm)
41{
42 struct drm_device *dev = drm->dev;
43
44 if (!drm_pci_device_is_agp(dev) || !dev->agp)
45 return false;
46
47 if (drm->agp.stat == UNKNOWN) {
48 if (!nouveau_agpmode)
49 return false;
50 return true;
51 }
52
53 return (drm->agp.stat == ENABLED);
54}
55#endif
56
57void
58nouveau_agp_reset(struct nouveau_drm *drm)
59{
60#if __OS_HAS_AGP
61 struct nouveau_device *device = nv_device(drm->device);
62 struct drm_device *dev = drm->dev;
63 u32 save[2];
64 int ret;
65
66 if (!nouveau_agp_enabled(drm))
67 return;
68
69 /* First of all, disable fast writes, otherwise if it's
70 * already enabled in the AGP bridge and we disable the card's
71 * AGP controller we might be locking ourselves out of it. */
72 if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
73 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
74 struct drm_agp_info info;
75 struct drm_agp_mode mode;
76
77 ret = drm_agp_info(dev, &info);
78 if (ret)
79 return;
80
81 mode.mode = get_agp_mode(drm, info.mode);
82 mode.mode &= ~PCI_AGP_COMMAND_FW;
83
84 ret = drm_agp_enable(dev, mode);
85 if (ret)
86 return;
87 }
88
89
90 /* clear busmaster bit, and disable AGP */
91 save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
92 nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
93
94 /* reset PGRAPH, PFIFO and PTIMER */
95 save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
96 nv_mask(device, 0x000200, 0x00011100, save[1]);
97
98 /* and restore bustmaster bit (gives effect of resetting AGP) */
99 nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
100#endif
101}
102
103void
104nouveau_agp_init(struct nouveau_drm *drm)
105{
106#if __OS_HAS_AGP
107 struct nouveau_device *device = nv_device(drm->device);
108 struct drm_device *dev = drm->dev;
109 struct drm_agp_info info;
110 struct drm_agp_mode mode;
111 int ret;
112
113 if (!nouveau_agp_enabled(drm))
114 return;
115 drm->agp.stat = DISABLE;
116
117 ret = drm_agp_acquire(dev);
118 if (ret) {
119 nv_error(device, "unable to acquire AGP: %d\n", ret);
120 return;
121 }
122
123 ret = drm_agp_info(dev, &info);
124 if (ret) {
125 nv_error(device, "unable to get AGP info: %d\n", ret);
126 return;
127 }
128
129 /* see agp.h for the AGPSTAT_* modes available */
130 mode.mode = get_agp_mode(drm, info.mode);
131
132 ret = drm_agp_enable(dev, mode);
133 if (ret) {
134 nv_error(device, "unable to enable AGP: %d\n", ret);
135 return;
136 }
137
138 drm->agp.stat = ENABLED;
139 drm->agp.base = info.aperture_base;
140 drm->agp.size = info.aperture_size;
141#endif
142}
143
144void
145nouveau_agp_fini(struct nouveau_drm *drm)
146{
147#if __OS_HAS_AGP
148 struct drm_device *dev = drm->dev;
149 if (dev->agp && dev->agp->acquired)
150 drm_agp_release(dev);
151#endif
152}
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.h b/drivers/gpu/drm/nouveau/nouveau_agp.h
new file mode 100644
index 000000000000..b55c08652963
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.h
@@ -0,0 +1,10 @@
1#ifndef __NOUVEAU_AGP_H__
2#define __NOUVEAU_AGP_H__
3
4struct nouveau_drm;
5
6void nouveau_agp_reset(struct nouveau_drm *);
7void nouveau_agp_init(struct nouveau_drm *);
8void nouveau_agp_fini(struct nouveau_drm *);
9
10#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2036748e56b4..f65b20a375f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -33,18 +33,17 @@
33#include <linux/backlight.h> 33#include <linux/backlight.h>
34#include <linux/acpi.h> 34#include <linux/acpi.h>
35 35
36#include <drm/drmP.h> 36#include "nouveau_drm.h"
37#include "nouveau_drv.h"
38#include <drm/nouveau_drm.h>
39#include "nouveau_reg.h" 37#include "nouveau_reg.h"
40#include "nouveau_encoder.h" 38#include "nouveau_encoder.h"
41 39
42static int 40static int
43nv40_get_intensity(struct backlight_device *bd) 41nv40_get_intensity(struct backlight_device *bd)
44{ 42{
45 struct drm_device *dev = bl_get_data(bd); 43 struct nouveau_drm *drm = bl_get_data(bd);
46 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) 44 struct nouveau_device *device = nv_device(drm->device);
47 >> 16; 45 int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
46 NV40_PMC_BACKLIGHT_MASK) >> 16;
48 47
49 return val; 48 return val;
50} 49}
@@ -52,11 +51,12 @@ nv40_get_intensity(struct backlight_device *bd)
52static int 51static int
53nv40_set_intensity(struct backlight_device *bd) 52nv40_set_intensity(struct backlight_device *bd)
54{ 53{
55 struct drm_device *dev = bl_get_data(bd); 54 struct nouveau_drm *drm = bl_get_data(bd);
55 struct nouveau_device *device = nv_device(drm->device);
56 int val = bd->props.brightness; 56 int val = bd->props.brightness;
57 int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT); 57 int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
58 58
59 nv_wr32(dev, NV40_PMC_BACKLIGHT, 59 nv_wr32(device, NV40_PMC_BACKLIGHT,
60 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK)); 60 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
61 61
62 return 0; 62 return 0;
@@ -71,23 +71,20 @@ static const struct backlight_ops nv40_bl_ops = {
71static int 71static int
72nv40_backlight_init(struct drm_connector *connector) 72nv40_backlight_init(struct drm_connector *connector)
73{ 73{
74 struct drm_device *dev = connector->dev; 74 struct nouveau_drm *drm = nouveau_drm(connector->dev);
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct nouveau_device *device = nv_device(drm->device);
76 struct backlight_properties props; 76 struct backlight_properties props;
77 struct backlight_device *bd; 77 struct backlight_device *bd;
78 78
79 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)) 79 if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
80 return 0; 80 return 0;
81 81
82 memset(&props, 0, sizeof(struct backlight_properties)); 82 memset(&props, 0, sizeof(struct backlight_properties));
83 props.type = BACKLIGHT_RAW; 83 props.type = BACKLIGHT_RAW;
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, dev, 85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd)) 87 drm->backlight = bd;
88 return PTR_ERR(bd);
89
90 dev_priv->backlight = bd;
91 bd->props.brightness = nv40_get_intensity(bd); 88 bd->props.brightness = nv40_get_intensity(bd);
92 backlight_update_status(bd); 89 backlight_update_status(bd);
93 90
@@ -98,12 +95,13 @@ static int
98nv50_get_intensity(struct backlight_device *bd) 95nv50_get_intensity(struct backlight_device *bd)
99{ 96{
100 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 97 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
101 struct drm_device *dev = nv_encoder->base.base.dev; 98 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
99 struct nouveau_device *device = nv_device(drm->device);
102 int or = nv_encoder->or; 100 int or = nv_encoder->or;
103 u32 div = 1025; 101 u32 div = 1025;
104 u32 val; 102 u32 val;
105 103
106 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); 104 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
107 val &= NV50_PDISP_SOR_PWM_CTL_VAL; 105 val &= NV50_PDISP_SOR_PWM_CTL_VAL;
108 return ((val * 100) + (div / 2)) / div; 106 return ((val * 100) + (div / 2)) / div;
109} 107}
@@ -112,13 +110,14 @@ static int
112nv50_set_intensity(struct backlight_device *bd) 110nv50_set_intensity(struct backlight_device *bd)
113{ 111{
114 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 112 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
115 struct drm_device *dev = nv_encoder->base.base.dev; 113 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
114 struct nouveau_device *device = nv_device(drm->device);
116 int or = nv_encoder->or; 115 int or = nv_encoder->or;
117 u32 div = 1025; 116 u32 div = 1025;
118 u32 val = (bd->props.brightness * div) / 100; 117 u32 val = (bd->props.brightness * div) / 100;
119 118
120 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), 119 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
121 NV50_PDISP_SOR_PWM_CTL_NEW | val); 120 NV50_PDISP_SOR_PWM_CTL_NEW | val);
122 return 0; 121 return 0;
123} 122}
124 123
@@ -132,12 +131,13 @@ static int
132nva3_get_intensity(struct backlight_device *bd) 131nva3_get_intensity(struct backlight_device *bd)
133{ 132{
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 133 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct drm_device *dev = nv_encoder->base.base.dev; 134 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
135 struct nouveau_device *device = nv_device(drm->device);
136 int or = nv_encoder->or; 136 int or = nv_encoder->or;
137 u32 div, val; 137 u32 div, val;
138 138
139 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); 139 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
140 val = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or)); 140 val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
141 val &= NVA3_PDISP_SOR_PWM_CTL_VAL; 141 val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
142 if (div && div >= val) 142 if (div && div >= val)
143 return ((val * 100) + (div / 2)) / div; 143 return ((val * 100) + (div / 2)) / div;
@@ -149,16 +149,17 @@ static int
149nva3_set_intensity(struct backlight_device *bd) 149nva3_set_intensity(struct backlight_device *bd)
150{ 150{
151 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 151 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
152 struct drm_device *dev = nv_encoder->base.base.dev; 152 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
153 struct nouveau_device *device = nv_device(drm->device);
153 int or = nv_encoder->or; 154 int or = nv_encoder->or;
154 u32 div, val; 155 u32 div, val;
155 156
156 div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or)); 157 div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
157 val = (bd->props.brightness * div) / 100; 158 val = (bd->props.brightness * div) / 100;
158 if (div) { 159 if (div) {
159 nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val | 160 nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
160 NV50_PDISP_SOR_PWM_CTL_NEW | 161 NV50_PDISP_SOR_PWM_CTL_NEW |
161 NVA3_PDISP_SOR_PWM_CTL_UNK); 162 NVA3_PDISP_SOR_PWM_CTL_UNK);
162 return 0; 163 return 0;
163 } 164 }
164 165
@@ -174,26 +175,26 @@ static const struct backlight_ops nva3_bl_ops = {
174static int 175static int
175nv50_backlight_init(struct drm_connector *connector) 176nv50_backlight_init(struct drm_connector *connector)
176{ 177{
177 struct drm_device *dev = connector->dev; 178 struct nouveau_drm *drm = nouveau_drm(connector->dev);
178 struct drm_nouveau_private *dev_priv = dev->dev_private; 179 struct nouveau_device *device = nv_device(drm->device);
179 struct nouveau_encoder *nv_encoder; 180 struct nouveau_encoder *nv_encoder;
180 struct backlight_properties props; 181 struct backlight_properties props;
181 struct backlight_device *bd; 182 struct backlight_device *bd;
182 const struct backlight_ops *ops; 183 const struct backlight_ops *ops;
183 184
184 nv_encoder = find_encoder(connector, OUTPUT_LVDS); 185 nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
185 if (!nv_encoder) { 186 if (!nv_encoder) {
186 nv_encoder = find_encoder(connector, OUTPUT_DP); 187 nv_encoder = find_encoder(connector, DCB_OUTPUT_DP);
187 if (!nv_encoder) 188 if (!nv_encoder)
188 return -ENODEV; 189 return -ENODEV;
189 } 190 }
190 191
191 if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 192 if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
192 return 0; 193 return 0;
193 194
194 if (dev_priv->chipset <= 0xa0 || 195 if (device->chipset <= 0xa0 ||
195 dev_priv->chipset == 0xaa || 196 device->chipset == 0xaa ||
196 dev_priv->chipset == 0xac) 197 device->chipset == 0xac)
197 ops = &nv50_bl_ops; 198 ops = &nv50_bl_ops;
198 else 199 else
199 ops = &nva3_bl_ops; 200 ops = &nva3_bl_ops;
@@ -206,7 +207,7 @@ nv50_backlight_init(struct drm_connector *connector)
206 if (IS_ERR(bd)) 207 if (IS_ERR(bd))
207 return PTR_ERR(bd); 208 return PTR_ERR(bd);
208 209
209 dev_priv->backlight = bd; 210 drm->backlight = bd;
210 bd->props.brightness = bd->ops->get_brightness(bd); 211 bd->props.brightness = bd->ops->get_brightness(bd);
211 backlight_update_status(bd); 212 backlight_update_status(bd);
212 return 0; 213 return 0;
@@ -215,12 +216,13 @@ nv50_backlight_init(struct drm_connector *connector)
215int 216int
216nouveau_backlight_init(struct drm_device *dev) 217nouveau_backlight_init(struct drm_device *dev)
217{ 218{
218 struct drm_nouveau_private *dev_priv = dev->dev_private; 219 struct nouveau_drm *drm = nouveau_drm(dev);
220 struct nouveau_device *device = nv_device(drm->device);
219 struct drm_connector *connector; 221 struct drm_connector *connector;
220 222
221#ifdef CONFIG_ACPI 223#ifdef CONFIG_ACPI
222 if (acpi_video_backlight_support()) { 224 if (acpi_video_backlight_support()) {
223 NV_INFO(dev, "ACPI backlight interface available, " 225 NV_INFO(drm, "ACPI backlight interface available, "
224 "not registering our own\n"); 226 "not registering our own\n");
225 return 0; 227 return 0;
226 } 228 }
@@ -231,7 +233,7 @@ nouveau_backlight_init(struct drm_device *dev)
231 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 233 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
232 continue; 234 continue;
233 235
234 switch (dev_priv->card_type) { 236 switch (device->card_type) {
235 case NV_40: 237 case NV_40:
236 return nv40_backlight_init(connector); 238 return nv40_backlight_init(connector);
237 case NV_50: 239 case NV_50:
@@ -248,10 +250,10 @@ nouveau_backlight_init(struct drm_device *dev)
248void 250void
249nouveau_backlight_exit(struct drm_device *dev) 251nouveau_backlight_exit(struct drm_device *dev)
250{ 252{
251 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
252 254
253 if (dev_priv->backlight) { 255 if (drm->backlight) {
254 backlight_device_unregister(dev_priv->backlight); 256 backlight_device_unregister(drm->backlight);
255 dev_priv->backlight = NULL; 257 drm->backlight = NULL;
256 } 258 }
257} 259}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index a84290562ca7..09fdef235882 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,12 +22,14 @@
22 * SOFTWARE. 22 * SOFTWARE.
23 */ 23 */
24 24
25#include <subdev/bios.h>
26
25#include <drm/drmP.h> 27#include <drm/drmP.h>
26#define NV_DEBUG_NOTRACE 28
27#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h"
28#include "nouveau_hw.h" 31#include "nouveau_hw.h"
29#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
30#include "nouveau_gpio.h"
31 33
32#include <linux/io-mapping.h> 34#include <linux/io-mapping.h>
33#include <linux/firmware.h> 35#include <linux/firmware.h>
@@ -65,3677 +67,6 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
65 return false; 67 return false;
66} 68}
67 69
68static int
69score_vbios(struct nvbios *bios, const bool writeable)
70{
71 if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
72 NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
73 return 0;
74 }
75
76 if (nv_cksum(bios->data, bios->data[2] * 512)) {
77 NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
78 /* if a ro image is somewhat bad, it's probably all rubbish */
79 return writeable ? 2 : 1;
80 }
81
82 NV_TRACE(bios->dev, "... appears to be valid\n");
83 return 3;
84}
85
86static void
87bios_shadow_prom(struct nvbios *bios)
88{
89 struct drm_device *dev = bios->dev;
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 u32 pcireg, access;
92 u16 pcir;
93 int i;
94
95 /* enable access to rom */
96 if (dev_priv->card_type >= NV_50)
97 pcireg = 0x088050;
98 else
99 pcireg = NV_PBUS_PCI_NV_20;
100 access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
101
102 /* bail if no rom signature, with a workaround for a PROM reading
103 * issue on some chipsets. the first read after a period of
104 * inactivity returns the wrong result, so retry the first header
105 * byte a few times before giving up as a workaround
106 */
107 i = 16;
108 do {
109 if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
110 break;
111 } while (i--);
112
113 if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
114 goto out;
115
116 /* additional check (see note below) - read PCI record header */
117 pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
118 nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
119 if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
120 nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
121 nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
122 nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
123 goto out;
124
125 /* read entire bios image to system memory */
126 bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
127 bios->data = kmalloc(bios->length, GFP_KERNEL);
128 if (bios->data) {
129 for (i = 0; i < bios->length; i++)
130 bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
131 }
132
133out:
134 /* disable access to rom */
135 nv_wr32(dev, pcireg, access);
136}
137
138static void
139bios_shadow_pramin(struct nvbios *bios)
140{
141 struct drm_device *dev = bios->dev;
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 u32 bar0 = 0;
144 int i;
145
146 if (dev_priv->card_type >= NV_50) {
147 u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
148 if (!addr) {
149 addr = (u64)nv_rd32(dev, 0x001700) << 16;
150 addr += 0xf0000;
151 }
152
153 bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
154 }
155
156 /* bail if no rom signature */
157 if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
158 nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
159 goto out;
160
161 bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
162 bios->data = kmalloc(bios->length, GFP_KERNEL);
163 if (bios->data) {
164 for (i = 0; i < bios->length; i++)
165 bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
166 }
167
168out:
169 if (dev_priv->card_type >= NV_50)
170 nv_wr32(dev, 0x001700, bar0);
171}
172
173static void
174bios_shadow_pci(struct nvbios *bios)
175{
176 struct pci_dev *pdev = bios->dev->pdev;
177 size_t length;
178
179 if (!pci_enable_rom(pdev)) {
180 void __iomem *rom = pci_map_rom(pdev, &length);
181 if (rom && length) {
182 bios->data = kmalloc(length, GFP_KERNEL);
183 if (bios->data) {
184 memcpy_fromio(bios->data, rom, length);
185 bios->length = length;
186 }
187 }
188 if (rom)
189 pci_unmap_rom(pdev, rom);
190
191 pci_disable_rom(pdev);
192 }
193}
194
195static void
196bios_shadow_acpi(struct nvbios *bios)
197{
198 struct pci_dev *pdev = bios->dev->pdev;
199 int cnt = 65536 / ROM_BIOS_PAGE;
200 int ret;
201
202 if (!nouveau_acpi_rom_supported(pdev))
203 return;
204
205 bios->data = kmalloc(cnt * ROM_BIOS_PAGE, GFP_KERNEL);
206 if (!bios->data)
207 return;
208
209 bios->length = 0;
210 while (cnt--) {
211 ret = nouveau_acpi_get_bios_chunk(bios->data, bios->length,
212 ROM_BIOS_PAGE);
213 if (ret != ROM_BIOS_PAGE)
214 return;
215
216 bios->length += ROM_BIOS_PAGE;
217 }
218}
219
220struct methods {
221 const char desc[8];
222 void (*shadow)(struct nvbios *);
223 const bool rw;
224 int score;
225 u32 size;
226 u8 *data;
227};
228
229static bool
230bios_shadow(struct drm_device *dev)
231{
232 struct methods shadow_methods[] = {
233 { "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
234 { "PROM", bios_shadow_prom, false, 0, 0, NULL },
235 { "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
236 { "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
237 {}
238 };
239 struct drm_nouveau_private *dev_priv = dev->dev_private;
240 struct nvbios *bios = &dev_priv->vbios;
241 struct methods *mthd, *best;
242 const struct firmware *fw;
243 char fname[32];
244 int ret;
245
246 if (nouveau_vbios) {
247 /* try to match one of the built-in methods */
248 mthd = shadow_methods;
249 do {
250 if (strcasecmp(nouveau_vbios, mthd->desc))
251 continue;
252 NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
253
254 mthd->shadow(bios);
255 mthd->score = score_vbios(bios, mthd->rw);
256 if (mthd->score)
257 return true;
258 } while ((++mthd)->shadow);
259
260 /* attempt to load firmware image */
261 snprintf(fname, sizeof(fname), "nouveau/%s", nouveau_vbios);
262 ret = request_firmware(&fw, fname, &dev->pdev->dev);
263 if (ret == 0) {
264 bios->length = fw->size;
265 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
266 release_firmware(fw);
267
268 NV_INFO(dev, "VBIOS image: %s\n", nouveau_vbios);
269 if (score_vbios(bios, 1))
270 return true;
271
272 kfree(bios->data);
273 bios->data = NULL;
274 }
275
276 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
277 }
278
279 mthd = shadow_methods;
280 do {
281 NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
282 mthd->shadow(bios);
283 mthd->score = score_vbios(bios, mthd->rw);
284 mthd->size = bios->length;
285 mthd->data = bios->data;
286 bios->data = NULL;
287 } while (mthd->score != 3 && (++mthd)->shadow);
288
289 mthd = shadow_methods;
290 best = mthd;
291 do {
292 if (mthd->score > best->score) {
293 kfree(best->data);
294 best = mthd;
295 }
296 } while ((++mthd)->shadow);
297
298 if (best->score) {
299 NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
300 bios->length = best->size;
301 bios->data = best->data;
302 return true;
303 }
304
305 NV_ERROR(dev, "No valid VBIOS image found\n");
306 return false;
307}
308
309struct init_tbl_entry {
310 char *name;
311 uint8_t id;
312 /* Return:
313 * > 0: success, length of opcode
314 * 0: success, but abort further parsing of table (INIT_DONE etc)
315 * < 0: failure, table parsing will be aborted
316 */
317 int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
318};
319
320static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
321
322#define MACRO_INDEX_SIZE 2
323#define MACRO_SIZE 8
324#define CONDITION_SIZE 12
325#define IO_FLAG_CONDITION_SIZE 9
326#define IO_CONDITION_SIZE 5
327#define MEM_INIT_SIZE 66
328
329static void still_alive(void)
330{
331#if 0
332 sync();
333 mdelay(2);
334#endif
335}
336
337static uint32_t
338munge_reg(struct nvbios *bios, uint32_t reg)
339{
340 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
341 struct dcb_entry *dcbent = bios->display.output;
342
343 if (dev_priv->card_type < NV_50)
344 return reg;
345
346 if (reg & 0x80000000) {
347 BUG_ON(bios->display.crtc < 0);
348 reg += bios->display.crtc * 0x800;
349 }
350
351 if (reg & 0x40000000) {
352 BUG_ON(!dcbent);
353
354 reg += (ffs(dcbent->or) - 1) * 0x800;
355 if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
356 reg += 0x00000080;
357 }
358
359 reg &= ~0xe0000000;
360 return reg;
361}
362
363static int
364valid_reg(struct nvbios *bios, uint32_t reg)
365{
366 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
367 struct drm_device *dev = bios->dev;
368
369 /* C51 has misaligned regs on purpose. Marvellous */
370 if (reg & 0x2 ||
371 (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
372 NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
373
374 /* warn on C51 regs that haven't been verified accessible in tracing */
375 if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
376 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
377 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
378 reg);
379
380 if (reg >= (8*1024*1024)) {
381 NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
382 return 0;
383 }
384
385 return 1;
386}
387
388static bool
389valid_idx_port(struct nvbios *bios, uint16_t port)
390{
391 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
392 struct drm_device *dev = bios->dev;
393
394 /*
395 * If adding more ports here, the read/write functions below will need
396 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
397 * used for the port in question
398 */
399 if (dev_priv->card_type < NV_50) {
400 if (port == NV_CIO_CRX__COLOR)
401 return true;
402 if (port == NV_VIO_SRX)
403 return true;
404 } else {
405 if (port == NV_CIO_CRX__COLOR)
406 return true;
407 }
408
409 NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
410 port);
411
412 return false;
413}
414
415static bool
416valid_port(struct nvbios *bios, uint16_t port)
417{
418 struct drm_device *dev = bios->dev;
419
420 /*
421 * If adding more ports here, the read/write functions below will need
422 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
423 * used for the port in question
424 */
425 if (port == NV_VIO_VSE2)
426 return true;
427
428 NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
429
430 return false;
431}
432
433static uint32_t
434bios_rd32(struct nvbios *bios, uint32_t reg)
435{
436 uint32_t data;
437
438 reg = munge_reg(bios, reg);
439 if (!valid_reg(bios, reg))
440 return 0;
441
442 /*
443 * C51 sometimes uses regs with bit0 set in the address. For these
444 * cases there should exist a translation in a BIOS table to an IO
445 * port address which the BIOS uses for accessing the reg
446 *
447 * These only seem to appear for the power control regs to a flat panel,
448 * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
449 * for 0x1308 and 0x1310 are used - hence the mask below. An S3
450 * suspend-resume mmio trace from a C51 will be required to see if this
451 * is true for the power microcode in 0x14.., or whether the direct IO
452 * port access method is needed
453 */
454 if (reg & 0x1)
455 reg &= ~0x1;
456
457 data = nv_rd32(bios->dev, reg);
458
459 BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
460
461 return data;
462}
463
464static void
465bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
466{
467 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
468
469 reg = munge_reg(bios, reg);
470 if (!valid_reg(bios, reg))
471 return;
472
473 /* see note in bios_rd32 */
474 if (reg & 0x1)
475 reg &= 0xfffffffe;
476
477 LOG_OLD_VALUE(bios_rd32(bios, reg));
478 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
479
480 if (dev_priv->vbios.execute) {
481 still_alive();
482 nv_wr32(bios->dev, reg, data);
483 }
484}
485
486static uint8_t
487bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
488{
489 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
490 struct drm_device *dev = bios->dev;
491 uint8_t data;
492
493 if (!valid_idx_port(bios, port))
494 return 0;
495
496 if (dev_priv->card_type < NV_50) {
497 if (port == NV_VIO_SRX)
498 data = NVReadVgaSeq(dev, bios->state.crtchead, index);
499 else /* assume NV_CIO_CRX__COLOR */
500 data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
501 } else {
502 uint32_t data32;
503
504 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
505 data = (data32 >> ((index & 3) << 3)) & 0xff;
506 }
507
508 BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
509 "Head: 0x%02X, Data: 0x%02X\n",
510 port, index, bios->state.crtchead, data);
511 return data;
512}
513
514static void
515bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
516{
517 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
518 struct drm_device *dev = bios->dev;
519
520 if (!valid_idx_port(bios, port))
521 return;
522
523 /*
524 * The current head is maintained in the nvbios member state.crtchead.
525 * We trap changes to CR44 and update the head variable and hence the
526 * register set written.
527 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
528 * of the write, and to head1 after the write
529 */
530 if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
531 data != NV_CIO_CRE_44_HEADB)
532 bios->state.crtchead = 0;
533
534 LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
535 BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
536 "Head: 0x%02X, Data: 0x%02X\n",
537 port, index, bios->state.crtchead, data);
538
539 if (bios->execute && dev_priv->card_type < NV_50) {
540 still_alive();
541 if (port == NV_VIO_SRX)
542 NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
543 else /* assume NV_CIO_CRX__COLOR */
544 NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
545 } else
546 if (bios->execute) {
547 uint32_t data32, shift = (index & 3) << 3;
548
549 still_alive();
550
551 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
552 data32 &= ~(0xff << shift);
553 data32 |= (data << shift);
554 bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
555 }
556
557 if (port == NV_CIO_CRX__COLOR &&
558 index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
559 bios->state.crtchead = 1;
560}
561
562static uint8_t
563bios_port_rd(struct nvbios *bios, uint16_t port)
564{
565 uint8_t data, head = bios->state.crtchead;
566
567 if (!valid_port(bios, port))
568 return 0;
569
570 data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
571
572 BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
573 port, head, data);
574
575 return data;
576}
577
578static void
579bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
580{
581 int head = bios->state.crtchead;
582
583 if (!valid_port(bios, port))
584 return;
585
586 LOG_OLD_VALUE(bios_port_rd(bios, port));
587 BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
588 port, head, data);
589
590 if (!bios->execute)
591 return;
592
593 still_alive();
594 NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
595}
596
597static bool
598io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
599{
600 /*
601 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
602 * for the CRTC index; 1 byte for the mask to apply to the value
603 * retrieved from the CRTC; 1 byte for the shift right to apply to the
604 * masked CRTC value; 2 bytes for the offset to the flag array, to
605 * which the shifted value is added; 1 byte for the mask applied to the
606 * value read from the flag array; and 1 byte for the value to compare
607 * against the masked byte from the flag table.
608 */
609
610 uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
611 uint16_t crtcport = ROM16(bios->data[condptr]);
612 uint8_t crtcindex = bios->data[condptr + 2];
613 uint8_t mask = bios->data[condptr + 3];
614 uint8_t shift = bios->data[condptr + 4];
615 uint16_t flagarray = ROM16(bios->data[condptr + 5]);
616 uint8_t flagarraymask = bios->data[condptr + 7];
617 uint8_t cmpval = bios->data[condptr + 8];
618 uint8_t data;
619
620 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
621 "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
622 "Cmpval: 0x%02X\n",
623 offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
624
625 data = bios_idxprt_rd(bios, crtcport, crtcindex);
626
627 data = bios->data[flagarray + ((data & mask) >> shift)];
628 data &= flagarraymask;
629
630 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
631 offset, data, cmpval);
632
633 return (data == cmpval);
634}
635
636static bool
637bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
638{
639 /*
640 * The condition table entry has 4 bytes for the address of the
641 * register to check, 4 bytes for a mask to apply to the register and
642 * 4 for a test comparison value
643 */
644
645 uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
646 uint32_t reg = ROM32(bios->data[condptr]);
647 uint32_t mask = ROM32(bios->data[condptr + 4]);
648 uint32_t cmpval = ROM32(bios->data[condptr + 8]);
649 uint32_t data;
650
651 BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
652 offset, cond, reg, mask);
653
654 data = bios_rd32(bios, reg) & mask;
655
656 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
657 offset, data, cmpval);
658
659 return (data == cmpval);
660}
661
662static bool
663io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
664{
665 /*
666 * The IO condition entry has 2 bytes for the IO port address; 1 byte
667 * for the index to write to io_port; 1 byte for the mask to apply to
668 * the byte read from io_port+1; and 1 byte for the value to compare
669 * against the masked byte.
670 */
671
672 uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
673 uint16_t io_port = ROM16(bios->data[condptr]);
674 uint8_t port_index = bios->data[condptr + 2];
675 uint8_t mask = bios->data[condptr + 3];
676 uint8_t cmpval = bios->data[condptr + 4];
677
678 uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
679
680 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
681 offset, data, cmpval);
682
683 return (data == cmpval);
684}
685
686static int
687nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
688{
689 struct drm_nouveau_private *dev_priv = dev->dev_private;
690 struct nouveau_pll_vals pll;
691 struct pll_lims pll_limits;
692 u32 ctrl, mask, coef;
693 int ret;
694
695 ret = get_pll_limits(dev, reg, &pll_limits);
696 if (ret)
697 return ret;
698
699 clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
700 if (!clk)
701 return -ERANGE;
702
703 coef = pll.N1 << 8 | pll.M1;
704 ctrl = pll.log2P << 16;
705 mask = 0x00070000;
706 if (reg == 0x004008) {
707 mask |= 0x01f80000;
708 ctrl |= (pll_limits.log2p_bias << 19);
709 ctrl |= (pll.log2P << 22);
710 }
711
712 if (!dev_priv->vbios.execute)
713 return 0;
714
715 nv_mask(dev, reg + 0, mask, ctrl);
716 nv_wr32(dev, reg + 4, coef);
717 return 0;
718}
719
720static int
721setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
722{
723 struct drm_device *dev = bios->dev;
724 struct drm_nouveau_private *dev_priv = dev->dev_private;
725 /* clk in kHz */
726 struct pll_lims pll_lim;
727 struct nouveau_pll_vals pllvals;
728 int ret;
729
730 if (dev_priv->card_type >= NV_50)
731 return nv50_pll_set(dev, reg, clk);
732
733 /* high regs (such as in the mac g5 table) are not -= 4 */
734 ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
735 if (ret)
736 return ret;
737
738 clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
739 if (!clk)
740 return -ERANGE;
741
742 if (bios->execute) {
743 still_alive();
744 nouveau_hw_setpll(dev, reg, &pllvals);
745 }
746
747 return 0;
748}
749
750static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
751{
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nvbios *bios = &dev_priv->vbios;
754
755 /*
756 * For the results of this function to be correct, CR44 must have been
757 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
758 * and the DCB table parsed, before the script calling the function is
759 * run. run_digital_op_script is example of how to do such setup
760 */
761
762 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
763
764 if (dcb_entry > bios->dcb.entries) {
765 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
766 "(%02X)\n", dcb_entry);
767 dcb_entry = 0x7f; /* unused / invalid marker */
768 }
769
770 return dcb_entry;
771}
772
773static struct nouveau_i2c_chan *
774init_i2c_device_find(struct drm_device *dev, int i2c_index)
775{
776 if (i2c_index == 0xff) {
777 struct drm_nouveau_private *dev_priv = dev->dev_private;
778 struct dcb_table *dcb = &dev_priv->vbios.dcb;
779 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
780 int idx = dcb_entry_idx_from_crtchead(dev);
781
782 i2c_index = NV_I2C_DEFAULT(0);
783 if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
784 i2c_index = NV_I2C_DEFAULT(1);
785 }
786
787 return nouveau_i2c_find(dev, i2c_index);
788}
789
790static uint32_t
791get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
792{
793 /*
794 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
795 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
796 * CR58 for CR57 = 0 to index a table of offsets to the basic
797 * 0x6808b0 address.
798 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
799 * CR58 for CR57 = 0 to index a table of offsets to the basic
800 * 0x6808b0 address, and then flip the offset by 8.
801 */
802
803 struct drm_nouveau_private *dev_priv = dev->dev_private;
804 struct nvbios *bios = &dev_priv->vbios;
805 const int pramdac_offset[13] = {
806 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
807 const uint32_t pramdac_table[4] = {
808 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
809
810 if (mlv >= 0x80) {
811 int dcb_entry, dacoffset;
812
813 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
814 dcb_entry = dcb_entry_idx_from_crtchead(dev);
815 if (dcb_entry == 0x7f)
816 return 0;
817 dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
818 if (mlv == 0x81)
819 dacoffset ^= 8;
820 return 0x6808b0 + dacoffset;
821 } else {
822 if (mlv >= ARRAY_SIZE(pramdac_table)) {
823 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
824 mlv);
825 return 0;
826 }
827 return pramdac_table[mlv];
828 }
829}
830
831static int
832init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
833 struct init_exec *iexec)
834{
835 /*
836 * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
837 *
838 * offset (8 bit): opcode
839 * offset + 1 (16 bit): CRTC port
840 * offset + 3 (8 bit): CRTC index
841 * offset + 4 (8 bit): mask
842 * offset + 5 (8 bit): shift
843 * offset + 6 (8 bit): count
844 * offset + 7 (32 bit): register
845 * offset + 11 (32 bit): configuration 1
846 * ...
847 *
848 * Starting at offset + 11 there are "count" 32 bit values.
849 * To find out which value to use read index "CRTC index" on "CRTC
850 * port", AND this value with "mask" and then bit shift right "shift"
851 * bits. Read the appropriate value using this index and write to
852 * "register"
853 */
854
855 uint16_t crtcport = ROM16(bios->data[offset + 1]);
856 uint8_t crtcindex = bios->data[offset + 3];
857 uint8_t mask = bios->data[offset + 4];
858 uint8_t shift = bios->data[offset + 5];
859 uint8_t count = bios->data[offset + 6];
860 uint32_t reg = ROM32(bios->data[offset + 7]);
861 uint8_t config;
862 uint32_t configval;
863 int len = 11 + count * 4;
864
865 if (!iexec->execute)
866 return len;
867
868 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
869 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
870 offset, crtcport, crtcindex, mask, shift, count, reg);
871
872 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
873 if (config > count) {
874 NV_ERROR(bios->dev,
875 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
876 offset, config, count);
877 return len;
878 }
879
880 configval = ROM32(bios->data[offset + 11 + config * 4]);
881
882 BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
883
884 bios_wr32(bios, reg, configval);
885
886 return len;
887}
888
889static int
890init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
891{
892 /*
893 * INIT_REPEAT opcode: 0x33 ('3')
894 *
895 * offset (8 bit): opcode
896 * offset + 1 (8 bit): count
897 *
898 * Execute script following this opcode up to INIT_REPEAT_END
899 * "count" times
900 */
901
902 uint8_t count = bios->data[offset + 1];
903 uint8_t i;
904
905 /* no iexec->execute check by design */
906
907 BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
908 offset, count);
909
910 iexec->repeat = true;
911
912 /*
913 * count - 1, as the script block will execute once when we leave this
914 * opcode -- this is compatible with bios behaviour as:
915 * a) the block is always executed at least once, even if count == 0
916 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
917 * while we don't
918 */
919 for (i = 0; i < count - 1; i++)
920 parse_init_table(bios, offset + 2, iexec);
921
922 iexec->repeat = false;
923
924 return 2;
925}
926
927static int
928init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
929 struct init_exec *iexec)
930{
931 /*
932 * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
933 *
934 * offset (8 bit): opcode
935 * offset + 1 (16 bit): CRTC port
936 * offset + 3 (8 bit): CRTC index
937 * offset + 4 (8 bit): mask
938 * offset + 5 (8 bit): shift
939 * offset + 6 (8 bit): IO flag condition index
940 * offset + 7 (8 bit): count
941 * offset + 8 (32 bit): register
942 * offset + 12 (16 bit): frequency 1
943 * ...
944 *
945 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
946 * Set PLL register "register" to coefficients for frequency n,
947 * selected by reading index "CRTC index" of "CRTC port" ANDed with
948 * "mask" and shifted right by "shift".
949 *
950 * If "IO flag condition index" > 0, and condition met, double
951 * frequency before setting it.
952 */
953
954 uint16_t crtcport = ROM16(bios->data[offset + 1]);
955 uint8_t crtcindex = bios->data[offset + 3];
956 uint8_t mask = bios->data[offset + 4];
957 uint8_t shift = bios->data[offset + 5];
958 int8_t io_flag_condition_idx = bios->data[offset + 6];
959 uint8_t count = bios->data[offset + 7];
960 uint32_t reg = ROM32(bios->data[offset + 8]);
961 uint8_t config;
962 uint16_t freq;
963 int len = 12 + count * 2;
964
965 if (!iexec->execute)
966 return len;
967
968 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
969 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
970 "Count: 0x%02X, Reg: 0x%08X\n",
971 offset, crtcport, crtcindex, mask, shift,
972 io_flag_condition_idx, count, reg);
973
974 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
975 if (config > count) {
976 NV_ERROR(bios->dev,
977 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
978 offset, config, count);
979 return len;
980 }
981
982 freq = ROM16(bios->data[offset + 12 + config * 2]);
983
984 if (io_flag_condition_idx > 0) {
985 if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
986 BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
987 "frequency doubled\n", offset);
988 freq *= 2;
989 } else
990 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
991 "frequency unchanged\n", offset);
992 }
993
994 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
995 offset, reg, config, freq);
996
997 setPLL(bios, reg, freq * 10);
998
999 return len;
1000}
1001
1002static int
1003init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1004{
1005 /*
1006 * INIT_END_REPEAT opcode: 0x36 ('6')
1007 *
1008 * offset (8 bit): opcode
1009 *
1010 * Marks the end of the block for INIT_REPEAT to repeat
1011 */
1012
1013 /* no iexec->execute check by design */
1014
1015 /*
1016 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
1017 * we're not in repeat mode
1018 */
1019 if (iexec->repeat)
1020 return 0;
1021
1022 return 1;
1023}
1024
1025static int
1026init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1027{
1028 /*
1029 * INIT_COPY opcode: 0x37 ('7')
1030 *
1031 * offset (8 bit): opcode
1032 * offset + 1 (32 bit): register
1033 * offset + 5 (8 bit): shift
1034 * offset + 6 (8 bit): srcmask
1035 * offset + 7 (16 bit): CRTC port
1036 * offset + 9 (8 bit): CRTC index
1037 * offset + 10 (8 bit): mask
1038 *
1039 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
1040 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
1041 * port
1042 */
1043
1044 uint32_t reg = ROM32(bios->data[offset + 1]);
1045 uint8_t shift = bios->data[offset + 5];
1046 uint8_t srcmask = bios->data[offset + 6];
1047 uint16_t crtcport = ROM16(bios->data[offset + 7]);
1048 uint8_t crtcindex = bios->data[offset + 9];
1049 uint8_t mask = bios->data[offset + 10];
1050 uint32_t data;
1051 uint8_t crtcdata;
1052
1053 if (!iexec->execute)
1054 return 11;
1055
1056 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1057 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
1058 offset, reg, shift, srcmask, crtcport, crtcindex, mask);
1059
1060 data = bios_rd32(bios, reg);
1061
1062 if (shift < 0x80)
1063 data >>= shift;
1064 else
1065 data <<= (0x100 - shift);
1066
1067 data &= srcmask;
1068
1069 crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
1070 crtcdata |= (uint8_t)data;
1071 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1072
1073 return 11;
1074}
1075
1076static int
1077init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1078{
1079 /*
1080 * INIT_NOT opcode: 0x38 ('8')
1081 *
1082 * offset (8 bit): opcode
1083 *
1084 * Invert the current execute / no-execute condition (i.e. "else")
1085 */
1086 if (iexec->execute)
1087 BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
1088 else
1089 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1090
1091 iexec->execute = !iexec->execute;
1092 return 1;
1093}
1094
1095static int
1096init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1097 struct init_exec *iexec)
1098{
1099 /*
1100 * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
1101 *
1102 * offset (8 bit): opcode
1103 * offset + 1 (8 bit): condition number
1104 *
1105 * Check condition "condition number" in the IO flag condition table.
1106 * If condition not met skip subsequent opcodes until condition is
1107 * inverted (INIT_NOT), or we hit INIT_RESUME
1108 */
1109
1110 uint8_t cond = bios->data[offset + 1];
1111
1112 if (!iexec->execute)
1113 return 2;
1114
1115 if (io_flag_condition_met(bios, offset, cond))
1116 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
1117 else {
1118 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
1119 iexec->execute = false;
1120 }
1121
1122 return 2;
1123}
1124
1125static int
1126init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1127{
1128 /*
1129 * INIT_DP_CONDITION opcode: 0x3A ('')
1130 *
1131 * offset (8 bit): opcode
1132 * offset + 1 (8 bit): "sub" opcode
1133 * offset + 2 (8 bit): unknown
1134 *
1135 */
1136
1137 struct dcb_entry *dcb = bios->display.output;
1138 struct drm_device *dev = bios->dev;
1139 uint8_t cond = bios->data[offset + 1];
1140 uint8_t *table, *entry;
1141
1142 BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
1143
1144 if (!iexec->execute)
1145 return 3;
1146
1147 table = nouveau_dp_bios_data(dev, dcb, &entry);
1148 if (!table)
1149 return 3;
1150
1151 switch (cond) {
1152 case 0:
1153 entry = dcb_conn(dev, dcb->connector);
1154 if (!entry || entry[0] != DCB_CONNECTOR_eDP)
1155 iexec->execute = false;
1156 break;
1157 case 1:
1158 case 2:
1159 if ((table[0] < 0x40 && !(entry[5] & cond)) ||
1160 (table[0] == 0x40 && !(entry[4] & cond)))
1161 iexec->execute = false;
1162 break;
1163 case 5:
1164 {
1165 struct nouveau_i2c_chan *auxch;
1166 int ret;
1167
1168 auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
1169 if (!auxch) {
1170 NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset);
1171 return 3;
1172 }
1173
1174 ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
1175 if (ret) {
1176 NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret);
1177 return 3;
1178 }
1179
1180 if (!(cond & 1))
1181 iexec->execute = false;
1182 }
1183 break;
1184 default:
1185 NV_WARN(dev, "0x%04X: unknown INIT_3A op: %d\n", offset, cond);
1186 break;
1187 }
1188
1189 if (iexec->execute)
1190 BIOSLOG(bios, "0x%04X: continuing to execute\n", offset);
1191 else
1192 BIOSLOG(bios, "0x%04X: skipping following commands\n", offset);
1193
1194 return 3;
1195}
1196
1197static int
1198init_op_3b(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1199{
1200 /*
1201 * INIT_3B opcode: 0x3B ('')
1202 *
1203 * offset (8 bit): opcode
1204 * offset + 1 (8 bit): crtc index
1205 *
1206 */
1207
1208 uint8_t or = ffs(bios->display.output->or) - 1;
1209 uint8_t index = bios->data[offset + 1];
1210 uint8_t data;
1211
1212 if (!iexec->execute)
1213 return 2;
1214
1215 data = bios_idxprt_rd(bios, 0x3d4, index);
1216 bios_idxprt_wr(bios, 0x3d4, index, data & ~(1 << or));
1217 return 2;
1218}
1219
1220static int
1221init_op_3c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1222{
1223 /*
1224 * INIT_3C opcode: 0x3C ('')
1225 *
1226 * offset (8 bit): opcode
1227 * offset + 1 (8 bit): crtc index
1228 *
1229 */
1230
1231 uint8_t or = ffs(bios->display.output->or) - 1;
1232 uint8_t index = bios->data[offset + 1];
1233 uint8_t data;
1234
1235 if (!iexec->execute)
1236 return 2;
1237
1238 data = bios_idxprt_rd(bios, 0x3d4, index);
1239 bios_idxprt_wr(bios, 0x3d4, index, data | (1 << or));
1240 return 2;
1241}
1242
1243static int
1244init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1245 struct init_exec *iexec)
1246{
1247 /*
1248 * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
1249 *
1250 * offset (8 bit): opcode
1251 * offset + 1 (32 bit): control register
1252 * offset + 5 (32 bit): data register
1253 * offset + 9 (32 bit): mask
1254 * offset + 13 (32 bit): data
1255 * offset + 17 (8 bit): count
1256 * offset + 18 (8 bit): address 1
1257 * offset + 19 (8 bit): data 1
1258 * ...
1259 *
1260 * For each of "count" address and data pairs, write "data n" to
1261 * "data register", read the current value of "control register",
1262 * and write it back once ANDed with "mask", ORed with "data",
1263 * and ORed with "address n"
1264 */
1265
1266 uint32_t controlreg = ROM32(bios->data[offset + 1]);
1267 uint32_t datareg = ROM32(bios->data[offset + 5]);
1268 uint32_t mask = ROM32(bios->data[offset + 9]);
1269 uint32_t data = ROM32(bios->data[offset + 13]);
1270 uint8_t count = bios->data[offset + 17];
1271 int len = 18 + count * 2;
1272 uint32_t value;
1273 int i;
1274
1275 if (!iexec->execute)
1276 return len;
1277
1278 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1279 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
1280 offset, controlreg, datareg, mask, data, count);
1281
1282 for (i = 0; i < count; i++) {
1283 uint8_t instaddress = bios->data[offset + 18 + i * 2];
1284 uint8_t instdata = bios->data[offset + 19 + i * 2];
1285
1286 BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
1287 offset, instaddress, instdata);
1288
1289 bios_wr32(bios, datareg, instdata);
1290 value = bios_rd32(bios, controlreg) & mask;
1291 value |= data;
1292 value |= instaddress;
1293 bios_wr32(bios, controlreg, value);
1294 }
1295
1296 return len;
1297}
1298
1299static int
1300init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1301 struct init_exec *iexec)
1302{
1303 /*
1304 * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
1305 *
1306 * offset (8 bit): opcode
1307 * offset + 1 (16 bit): CRTC port
1308 * offset + 3 (8 bit): CRTC index
1309 * offset + 4 (8 bit): mask
1310 * offset + 5 (8 bit): shift
1311 * offset + 6 (8 bit): count
1312 * offset + 7 (32 bit): register
1313 * offset + 11 (32 bit): frequency 1
1314 * ...
1315 *
1316 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
1317 * Set PLL register "register" to coefficients for frequency n,
1318 * selected by reading index "CRTC index" of "CRTC port" ANDed with
1319 * "mask" and shifted right by "shift".
1320 */
1321
1322 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1323 uint8_t crtcindex = bios->data[offset + 3];
1324 uint8_t mask = bios->data[offset + 4];
1325 uint8_t shift = bios->data[offset + 5];
1326 uint8_t count = bios->data[offset + 6];
1327 uint32_t reg = ROM32(bios->data[offset + 7]);
1328 int len = 11 + count * 4;
1329 uint8_t config;
1330 uint32_t freq;
1331
1332 if (!iexec->execute)
1333 return len;
1334
1335 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1336 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1337 offset, crtcport, crtcindex, mask, shift, count, reg);
1338
1339 if (!reg)
1340 return len;
1341
1342 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1343 if (config > count) {
1344 NV_ERROR(bios->dev,
1345 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1346 offset, config, count);
1347 return len;
1348 }
1349
1350 freq = ROM32(bios->data[offset + 11 + config * 4]);
1351
1352 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
1353 offset, reg, config, freq);
1354
1355 setPLL(bios, reg, freq);
1356
1357 return len;
1358}
1359
1360static int
1361init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1362{
1363 /*
1364 * INIT_PLL2 opcode: 0x4B ('K')
1365 *
1366 * offset (8 bit): opcode
1367 * offset + 1 (32 bit): register
1368 * offset + 5 (32 bit): freq
1369 *
1370 * Set PLL register "register" to coefficients for frequency "freq"
1371 */
1372
1373 uint32_t reg = ROM32(bios->data[offset + 1]);
1374 uint32_t freq = ROM32(bios->data[offset + 5]);
1375
1376 if (!iexec->execute)
1377 return 9;
1378
1379 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1380 offset, reg, freq);
1381
1382 setPLL(bios, reg, freq);
1383 return 9;
1384}
1385
1386static int
1387init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1388{
1389 /*
1390 * INIT_I2C_BYTE opcode: 0x4C ('L')
1391 *
1392 * offset (8 bit): opcode
1393 * offset + 1 (8 bit): DCB I2C table entry index
1394 * offset + 2 (8 bit): I2C slave address
1395 * offset + 3 (8 bit): count
1396 * offset + 4 (8 bit): I2C register 1
1397 * offset + 5 (8 bit): mask 1
1398 * offset + 6 (8 bit): data 1
1399 * ...
1400 *
1401 * For each of "count" registers given by "I2C register n" on the device
1402 * addressed by "I2C slave address" on the I2C bus given by
1403 * "DCB I2C table entry index", read the register, AND the result with
1404 * "mask n" and OR it with "data n" before writing it back to the device
1405 */
1406
1407 struct drm_device *dev = bios->dev;
1408 uint8_t i2c_index = bios->data[offset + 1];
1409 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1410 uint8_t count = bios->data[offset + 3];
1411 struct nouveau_i2c_chan *chan;
1412 int len = 4 + count * 3;
1413 int ret, i;
1414
1415 if (!iexec->execute)
1416 return len;
1417
1418 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1419 "Count: 0x%02X\n",
1420 offset, i2c_index, i2c_address, count);
1421
1422 chan = init_i2c_device_find(dev, i2c_index);
1423 if (!chan) {
1424 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1425 return len;
1426 }
1427
1428 for (i = 0; i < count; i++) {
1429 uint8_t reg = bios->data[offset + 4 + i * 3];
1430 uint8_t mask = bios->data[offset + 5 + i * 3];
1431 uint8_t data = bios->data[offset + 6 + i * 3];
1432 union i2c_smbus_data val;
1433
1434 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1435 I2C_SMBUS_READ, reg,
1436 I2C_SMBUS_BYTE_DATA, &val);
1437 if (ret < 0) {
1438 NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret);
1439 return len;
1440 }
1441
1442 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1443 "Mask: 0x%02X, Data: 0x%02X\n",
1444 offset, reg, val.byte, mask, data);
1445
1446 if (!bios->execute)
1447 continue;
1448
1449 val.byte &= mask;
1450 val.byte |= data;
1451 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1452 I2C_SMBUS_WRITE, reg,
1453 I2C_SMBUS_BYTE_DATA, &val);
1454 if (ret < 0) {
1455 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1456 return len;
1457 }
1458 }
1459
1460 return len;
1461}
1462
1463static int
1464init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1465{
1466 /*
1467 * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
1468 *
1469 * offset (8 bit): opcode
1470 * offset + 1 (8 bit): DCB I2C table entry index
1471 * offset + 2 (8 bit): I2C slave address
1472 * offset + 3 (8 bit): count
1473 * offset + 4 (8 bit): I2C register 1
1474 * offset + 5 (8 bit): data 1
1475 * ...
1476 *
1477 * For each of "count" registers given by "I2C register n" on the device
1478 * addressed by "I2C slave address" on the I2C bus given by
1479 * "DCB I2C table entry index", set the register to "data n"
1480 */
1481
1482 struct drm_device *dev = bios->dev;
1483 uint8_t i2c_index = bios->data[offset + 1];
1484 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1485 uint8_t count = bios->data[offset + 3];
1486 struct nouveau_i2c_chan *chan;
1487 int len = 4 + count * 2;
1488 int ret, i;
1489
1490 if (!iexec->execute)
1491 return len;
1492
1493 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1494 "Count: 0x%02X\n",
1495 offset, i2c_index, i2c_address, count);
1496
1497 chan = init_i2c_device_find(dev, i2c_index);
1498 if (!chan) {
1499 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1500 return len;
1501 }
1502
1503 for (i = 0; i < count; i++) {
1504 uint8_t reg = bios->data[offset + 4 + i * 2];
1505 union i2c_smbus_data val;
1506
1507 val.byte = bios->data[offset + 5 + i * 2];
1508
1509 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1510 offset, reg, val.byte);
1511
1512 if (!bios->execute)
1513 continue;
1514
1515 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
1516 I2C_SMBUS_WRITE, reg,
1517 I2C_SMBUS_BYTE_DATA, &val);
1518 if (ret < 0) {
1519 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1520 return len;
1521 }
1522 }
1523
1524 return len;
1525}
1526
1527static int
1528init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1529{
1530 /*
1531 * INIT_ZM_I2C opcode: 0x4E ('N')
1532 *
1533 * offset (8 bit): opcode
1534 * offset + 1 (8 bit): DCB I2C table entry index
1535 * offset + 2 (8 bit): I2C slave address
1536 * offset + 3 (8 bit): count
1537 * offset + 4 (8 bit): data 1
1538 * ...
1539 *
1540 * Send "count" bytes ("data n") to the device addressed by "I2C slave
1541 * address" on the I2C bus given by "DCB I2C table entry index"
1542 */
1543
1544 struct drm_device *dev = bios->dev;
1545 uint8_t i2c_index = bios->data[offset + 1];
1546 uint8_t i2c_address = bios->data[offset + 2] >> 1;
1547 uint8_t count = bios->data[offset + 3];
1548 int len = 4 + count;
1549 struct nouveau_i2c_chan *chan;
1550 struct i2c_msg msg;
1551 uint8_t data[256];
1552 int ret, i;
1553
1554 if (!iexec->execute)
1555 return len;
1556
1557 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1558 "Count: 0x%02X\n",
1559 offset, i2c_index, i2c_address, count);
1560
1561 chan = init_i2c_device_find(dev, i2c_index);
1562 if (!chan) {
1563 NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
1564 return len;
1565 }
1566
1567 for (i = 0; i < count; i++) {
1568 data[i] = bios->data[offset + 4 + i];
1569
1570 BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
1571 }
1572
1573 if (bios->execute) {
1574 msg.addr = i2c_address;
1575 msg.flags = 0;
1576 msg.len = count;
1577 msg.buf = data;
1578 ret = i2c_transfer(&chan->adapter, &msg, 1);
1579 if (ret != 1) {
1580 NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
1581 return len;
1582 }
1583 }
1584
1585 return len;
1586}
1587
1588static int
1589init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1590{
1591 /*
1592 * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
1593 *
1594 * offset (8 bit): opcode
1595 * offset + 1 (8 bit): magic lookup value
1596 * offset + 2 (8 bit): TMDS address
1597 * offset + 3 (8 bit): mask
1598 * offset + 4 (8 bit): data
1599 *
1600 * Read the data reg for TMDS address "TMDS address", AND it with mask
1601 * and OR it with data, then write it back
1602 * "magic lookup value" determines which TMDS base address register is
1603 * used -- see get_tmds_index_reg()
1604 */
1605
1606 struct drm_device *dev = bios->dev;
1607 uint8_t mlv = bios->data[offset + 1];
1608 uint32_t tmdsaddr = bios->data[offset + 2];
1609 uint8_t mask = bios->data[offset + 3];
1610 uint8_t data = bios->data[offset + 4];
1611 uint32_t reg, value;
1612
1613 if (!iexec->execute)
1614 return 5;
1615
1616 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1617 "Mask: 0x%02X, Data: 0x%02X\n",
1618 offset, mlv, tmdsaddr, mask, data);
1619
1620 reg = get_tmds_index_reg(bios->dev, mlv);
1621 if (!reg) {
1622 NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
1623 return 5;
1624 }
1625
1626 bios_wr32(bios, reg,
1627 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
1628 value = (bios_rd32(bios, reg + 4) & mask) | data;
1629 bios_wr32(bios, reg + 4, value);
1630 bios_wr32(bios, reg, tmdsaddr);
1631
1632 return 5;
1633}
1634
1635static int
1636init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1637 struct init_exec *iexec)
1638{
1639 /*
1640 * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
1641 *
1642 * offset (8 bit): opcode
1643 * offset + 1 (8 bit): magic lookup value
1644 * offset + 2 (8 bit): count
1645 * offset + 3 (8 bit): addr 1
1646 * offset + 4 (8 bit): data 1
1647 * ...
1648 *
1649 * For each of "count" TMDS address and data pairs write "data n" to
1650 * "addr n". "magic lookup value" determines which TMDS base address
1651 * register is used -- see get_tmds_index_reg()
1652 */
1653
1654 struct drm_device *dev = bios->dev;
1655 uint8_t mlv = bios->data[offset + 1];
1656 uint8_t count = bios->data[offset + 2];
1657 int len = 3 + count * 2;
1658 uint32_t reg;
1659 int i;
1660
1661 if (!iexec->execute)
1662 return len;
1663
1664 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1665 offset, mlv, count);
1666
1667 reg = get_tmds_index_reg(bios->dev, mlv);
1668 if (!reg) {
1669 NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
1670 return len;
1671 }
1672
1673 for (i = 0; i < count; i++) {
1674 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
1675 uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
1676
1677 bios_wr32(bios, reg + 4, tmdsdata);
1678 bios_wr32(bios, reg, tmdsaddr);
1679 }
1680
1681 return len;
1682}
1683
1684static int
1685init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1686 struct init_exec *iexec)
1687{
1688 /*
1689 * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
1690 *
1691 * offset (8 bit): opcode
1692 * offset + 1 (8 bit): CRTC index1
1693 * offset + 2 (8 bit): CRTC index2
1694 * offset + 3 (8 bit): baseaddr
1695 * offset + 4 (8 bit): count
1696 * offset + 5 (8 bit): data 1
1697 * ...
1698 *
1699 * For each of "count" address and data pairs, write "baseaddr + n" to
1700 * "CRTC index1" and "data n" to "CRTC index2"
1701 * Once complete, restore initial value read from "CRTC index1"
1702 */
1703 uint8_t crtcindex1 = bios->data[offset + 1];
1704 uint8_t crtcindex2 = bios->data[offset + 2];
1705 uint8_t baseaddr = bios->data[offset + 3];
1706 uint8_t count = bios->data[offset + 4];
1707 int len = 5 + count;
1708 uint8_t oldaddr, data;
1709 int i;
1710
1711 if (!iexec->execute)
1712 return len;
1713
1714 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1715 "BaseAddr: 0x%02X, Count: 0x%02X\n",
1716 offset, crtcindex1, crtcindex2, baseaddr, count);
1717
1718 oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
1719
1720 for (i = 0; i < count; i++) {
1721 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
1722 baseaddr + i);
1723 data = bios->data[offset + 5 + i];
1724 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
1725 }
1726
1727 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1728
1729 return len;
1730}
1731
1732static int
1733init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1734{
1735 /*
1736 * INIT_CR opcode: 0x52 ('R')
1737 *
1738 * offset (8 bit): opcode
1739 * offset + 1 (8 bit): CRTC index
1740 * offset + 2 (8 bit): mask
1741 * offset + 3 (8 bit): data
1742 *
1743 * Assign the value of at "CRTC index" ANDed with mask and ORed with
1744 * data back to "CRTC index"
1745 */
1746
1747 uint8_t crtcindex = bios->data[offset + 1];
1748 uint8_t mask = bios->data[offset + 2];
1749 uint8_t data = bios->data[offset + 3];
1750 uint8_t value;
1751
1752 if (!iexec->execute)
1753 return 4;
1754
1755 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1756 offset, crtcindex, mask, data);
1757
1758 value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
1759 value |= data;
1760 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1761
1762 return 4;
1763}
1764
1765static int
1766init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1767{
1768 /*
1769 * INIT_ZM_CR opcode: 0x53 ('S')
1770 *
1771 * offset (8 bit): opcode
1772 * offset + 1 (8 bit): CRTC index
1773 * offset + 2 (8 bit): value
1774 *
1775 * Assign "value" to CRTC register with index "CRTC index".
1776 */
1777
1778 uint8_t crtcindex = ROM32(bios->data[offset + 1]);
1779 uint8_t data = bios->data[offset + 2];
1780
1781 if (!iexec->execute)
1782 return 3;
1783
1784 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1785
1786 return 3;
1787}
1788
1789static int
1790init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1791{
1792 /*
1793 * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
1794 *
1795 * offset (8 bit): opcode
1796 * offset + 1 (8 bit): count
1797 * offset + 2 (8 bit): CRTC index 1
1798 * offset + 3 (8 bit): value 1
1799 * ...
1800 *
1801 * For "count", assign "value n" to CRTC register with index
1802 * "CRTC index n".
1803 */
1804
1805 uint8_t count = bios->data[offset + 1];
1806 int len = 2 + count * 2;
1807 int i;
1808
1809 if (!iexec->execute)
1810 return len;
1811
1812 for (i = 0; i < count; i++)
1813 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1814
1815 return len;
1816}
1817
1818static int
1819init_condition_time(struct nvbios *bios, uint16_t offset,
1820 struct init_exec *iexec)
1821{
1822 /*
1823 * INIT_CONDITION_TIME opcode: 0x56 ('V')
1824 *
1825 * offset (8 bit): opcode
1826 * offset + 1 (8 bit): condition number
1827 * offset + 2 (8 bit): retries / 50
1828 *
1829 * Check condition "condition number" in the condition table.
1830 * Bios code then sleeps for 2ms if the condition is not met, and
1831 * repeats up to "retries" times, but on one C51 this has proved
1832 * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
1833 * this, and bail after "retries" times, or 2s, whichever is less.
1834 * If still not met after retries, clear execution flag for this table.
1835 */
1836
1837 uint8_t cond = bios->data[offset + 1];
1838 uint16_t retries = bios->data[offset + 2] * 50;
1839 unsigned cnt;
1840
1841 if (!iexec->execute)
1842 return 3;
1843
1844 if (retries > 100)
1845 retries = 100;
1846
1847 BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
1848 offset, cond, retries);
1849
1850 if (!bios->execute) /* avoid 2s delays when "faking" execution */
1851 retries = 1;
1852
1853 for (cnt = 0; cnt < retries; cnt++) {
1854 if (bios_condition_met(bios, offset, cond)) {
1855 BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
1856 offset);
1857 break;
1858 } else {
1859 BIOSLOG(bios, "0x%04X: "
1860 "Condition not met, sleeping for 20ms\n",
1861 offset);
1862 mdelay(20);
1863 }
1864 }
1865
1866 if (!bios_condition_met(bios, offset, cond)) {
1867 NV_WARN(bios->dev,
1868 "0x%04X: Condition still not met after %dms, "
1869 "skipping following opcodes\n", offset, 20 * retries);
1870 iexec->execute = false;
1871 }
1872
1873 return 3;
1874}
1875
1876static int
1877init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1878{
1879 /*
1880 * INIT_LTIME opcode: 0x57 ('V')
1881 *
1882 * offset (8 bit): opcode
1883 * offset + 1 (16 bit): time
1884 *
1885 * Sleep for "time" milliseconds.
1886 */
1887
1888 unsigned time = ROM16(bios->data[offset + 1]);
1889
1890 if (!iexec->execute)
1891 return 3;
1892
1893 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
1894 offset, time);
1895
1896 mdelay(time);
1897
1898 return 3;
1899}
1900
1901static int
1902init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1903 struct init_exec *iexec)
1904{
1905 /*
1906 * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
1907 *
1908 * offset (8 bit): opcode
1909 * offset + 1 (32 bit): base register
1910 * offset + 5 (8 bit): count
1911 * offset + 6 (32 bit): value 1
1912 * ...
1913 *
1914 * Starting at offset + 6 there are "count" 32 bit values.
1915 * For "count" iterations set "base register" + 4 * current_iteration
1916 * to "value current_iteration"
1917 */
1918
1919 uint32_t basereg = ROM32(bios->data[offset + 1]);
1920 uint32_t count = bios->data[offset + 5];
1921 int len = 6 + count * 4;
1922 int i;
1923
1924 if (!iexec->execute)
1925 return len;
1926
1927 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1928 offset, basereg, count);
1929
1930 for (i = 0; i < count; i++) {
1931 uint32_t reg = basereg + i * 4;
1932 uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
1933
1934 bios_wr32(bios, reg, data);
1935 }
1936
1937 return len;
1938}
1939
1940static int
1941init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1942{
1943 /*
1944 * INIT_SUB_DIRECT opcode: 0x5B ('[')
1945 *
1946 * offset (8 bit): opcode
1947 * offset + 1 (16 bit): subroutine offset (in bios)
1948 *
1949 * Calls a subroutine that will execute commands until INIT_DONE
1950 * is found.
1951 */
1952
1953 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1954
1955 if (!iexec->execute)
1956 return 3;
1957
1958 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1959 offset, sub_offset);
1960
1961 parse_init_table(bios, sub_offset, iexec);
1962
1963 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1964
1965 return 3;
1966}
1967
1968static int
1969init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1970{
1971 /*
1972 * INIT_JUMP opcode: 0x5C ('\')
1973 *
1974 * offset (8 bit): opcode
1975 * offset + 1 (16 bit): offset (in bios)
1976 *
1977 * Continue execution of init table from 'offset'
1978 */
1979
1980 uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
1981
1982 if (!iexec->execute)
1983 return 3;
1984
1985 BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
1986 return jmp_offset - offset;
1987}
1988
1989static int
1990init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1991{
1992 /*
1993 * INIT_I2C_IF opcode: 0x5E ('^')
1994 *
1995 * offset (8 bit): opcode
1996 * offset + 1 (8 bit): DCB I2C table entry index
1997 * offset + 2 (8 bit): I2C slave address
1998 * offset + 3 (8 bit): I2C register
1999 * offset + 4 (8 bit): mask
2000 * offset + 5 (8 bit): data
2001 *
2002 * Read the register given by "I2C register" on the device addressed
2003 * by "I2C slave address" on the I2C bus given by "DCB I2C table
2004 * entry index". Compare the result AND "mask" to "data".
2005 * If they're not equal, skip subsequent opcodes until condition is
2006 * inverted (INIT_NOT), or we hit INIT_RESUME
2007 */
2008
2009 uint8_t i2c_index = bios->data[offset + 1];
2010 uint8_t i2c_address = bios->data[offset + 2] >> 1;
2011 uint8_t reg = bios->data[offset + 3];
2012 uint8_t mask = bios->data[offset + 4];
2013 uint8_t data = bios->data[offset + 5];
2014 struct nouveau_i2c_chan *chan;
2015 union i2c_smbus_data val;
2016 int ret;
2017
2018 /* no execute check by design */
2019
2020 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
2021 offset, i2c_index, i2c_address);
2022
2023 chan = init_i2c_device_find(bios->dev, i2c_index);
2024 if (!chan)
2025 return -ENODEV;
2026
2027 ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
2028 I2C_SMBUS_READ, reg,
2029 I2C_SMBUS_BYTE_DATA, &val);
2030 if (ret < 0) {
2031 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: [no device], "
2032 "Mask: 0x%02X, Data: 0x%02X\n",
2033 offset, reg, mask, data);
2034 iexec->execute = 0;
2035 return 6;
2036 }
2037
2038 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
2039 "Mask: 0x%02X, Data: 0x%02X\n",
2040 offset, reg, val.byte, mask, data);
2041
2042 iexec->execute = ((val.byte & mask) == data);
2043
2044 return 6;
2045}
2046
2047static int
2048init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2049{
2050 /*
2051 * INIT_COPY_NV_REG opcode: 0x5F ('_')
2052 *
2053 * offset (8 bit): opcode
2054 * offset + 1 (32 bit): src reg
2055 * offset + 5 (8 bit): shift
2056 * offset + 6 (32 bit): src mask
2057 * offset + 10 (32 bit): xor
2058 * offset + 14 (32 bit): dst reg
2059 * offset + 18 (32 bit): dst mask
2060 *
2061 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
2062 * "src mask", then XOR with "xor". Write this OR'd with
2063 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
2064 */
2065
2066 uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
2067 uint8_t shift = bios->data[offset + 5];
2068 uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
2069 uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
2070 uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
2071 uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
2072 uint32_t srcvalue, dstvalue;
2073
2074 if (!iexec->execute)
2075 return 22;
2076
2077 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
2078 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
2079 offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
2080
2081 srcvalue = bios_rd32(bios, srcreg);
2082
2083 if (shift < 0x80)
2084 srcvalue >>= shift;
2085 else
2086 srcvalue <<= (0x100 - shift);
2087
2088 srcvalue = (srcvalue & srcmask) ^ xor;
2089
2090 dstvalue = bios_rd32(bios, dstreg) & dstmask;
2091
2092 bios_wr32(bios, dstreg, dstvalue | srcvalue);
2093
2094 return 22;
2095}
2096
2097static int
2098init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2099{
2100 /*
2101 * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
2102 *
2103 * offset (8 bit): opcode
2104 * offset + 1 (16 bit): CRTC port
2105 * offset + 3 (8 bit): CRTC index
2106 * offset + 4 (8 bit): data
2107 *
2108 * Write "data" to index "CRTC index" of "CRTC port"
2109 */
2110 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2111 uint8_t crtcindex = bios->data[offset + 3];
2112 uint8_t data = bios->data[offset + 4];
2113
2114 if (!iexec->execute)
2115 return 5;
2116
2117 bios_idxprt_wr(bios, crtcport, crtcindex, data);
2118
2119 return 5;
2120}
2121
2122static inline void
2123bios_md32(struct nvbios *bios, uint32_t reg,
2124 uint32_t mask, uint32_t val)
2125{
2126 bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val);
2127}
2128
2129static uint32_t
2130peek_fb(struct drm_device *dev, struct io_mapping *fb,
2131 uint32_t off)
2132{
2133 uint32_t val = 0;
2134
2135 if (off < pci_resource_len(dev->pdev, 1)) {
2136 uint8_t __iomem *p =
2137 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2138
2139 val = ioread32(p + (off & ~PAGE_MASK));
2140
2141 io_mapping_unmap_atomic(p);
2142 }
2143
2144 return val;
2145}
2146
2147static void
2148poke_fb(struct drm_device *dev, struct io_mapping *fb,
2149 uint32_t off, uint32_t val)
2150{
2151 if (off < pci_resource_len(dev->pdev, 1)) {
2152 uint8_t __iomem *p =
2153 io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
2154
2155 iowrite32(val, p + (off & ~PAGE_MASK));
2156 wmb();
2157
2158 io_mapping_unmap_atomic(p);
2159 }
2160}
2161
2162static inline bool
2163read_back_fb(struct drm_device *dev, struct io_mapping *fb,
2164 uint32_t off, uint32_t val)
2165{
2166 poke_fb(dev, fb, off, val);
2167 return val == peek_fb(dev, fb, off);
2168}
2169
2170static int
2171nv04_init_compute_mem(struct nvbios *bios)
2172{
2173 struct drm_device *dev = bios->dev;
2174 uint32_t patt = 0xdeadbeef;
2175 struct io_mapping *fb;
2176 int i;
2177
2178 /* Map the framebuffer aperture */
2179 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2180 pci_resource_len(dev->pdev, 1));
2181 if (!fb)
2182 return -ENOMEM;
2183
2184 /* Sequencer and refresh off */
2185 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
2186 bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
2187
2188 bios_md32(bios, NV04_PFB_BOOT_0, ~0,
2189 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
2190 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2191 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
2192
2193 for (i = 0; i < 4; i++)
2194 poke_fb(dev, fb, 4 * i, patt);
2195
2196 poke_fb(dev, fb, 0x400000, patt + 1);
2197
2198 if (peek_fb(dev, fb, 0) == patt + 1) {
2199 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
2200 NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
2201 bios_md32(bios, NV04_PFB_DEBUG_0,
2202 NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2203
2204 for (i = 0; i < 4; i++)
2205 poke_fb(dev, fb, 4 * i, patt);
2206
2207 if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff))
2208 bios_md32(bios, NV04_PFB_BOOT_0,
2209 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2210 NV04_PFB_BOOT_0_RAM_AMOUNT,
2211 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2212
2213 } else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) !=
2214 (patt & 0xffff0000)) {
2215 bios_md32(bios, NV04_PFB_BOOT_0,
2216 NV04_PFB_BOOT_0_RAM_WIDTH_128 |
2217 NV04_PFB_BOOT_0_RAM_AMOUNT,
2218 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2219
2220 } else if (peek_fb(dev, fb, 0) != patt) {
2221 if (read_back_fb(dev, fb, 0x800000, patt))
2222 bios_md32(bios, NV04_PFB_BOOT_0,
2223 NV04_PFB_BOOT_0_RAM_AMOUNT,
2224 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2225 else
2226 bios_md32(bios, NV04_PFB_BOOT_0,
2227 NV04_PFB_BOOT_0_RAM_AMOUNT,
2228 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2229
2230 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
2231 NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
2232
2233 } else if (!read_back_fb(dev, fb, 0x800000, patt)) {
2234 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2235 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2236
2237 }
2238
2239 /* Refresh on, sequencer on */
2240 bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2241 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
2242
2243 io_mapping_free(fb);
2244 return 0;
2245}
2246
2247static const uint8_t *
2248nv05_memory_config(struct nvbios *bios)
2249{
2250 /* Defaults for BIOSes lacking a memory config table */
2251 static const uint8_t default_config_tab[][2] = {
2252 { 0x24, 0x00 },
2253 { 0x28, 0x00 },
2254 { 0x24, 0x01 },
2255 { 0x1f, 0x00 },
2256 { 0x0f, 0x00 },
2257 { 0x17, 0x00 },
2258 { 0x06, 0x00 },
2259 { 0x00, 0x00 }
2260 };
2261 int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) &
2262 NV_PEXTDEV_BOOT_0_RAMCFG) >> 2;
2263
2264 if (bios->legacy.mem_init_tbl_ptr)
2265 return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i];
2266 else
2267 return default_config_tab[i];
2268}
2269
2270static int
2271nv05_init_compute_mem(struct nvbios *bios)
2272{
2273 struct drm_device *dev = bios->dev;
2274 const uint8_t *ramcfg = nv05_memory_config(bios);
2275 uint32_t patt = 0xdeadbeef;
2276 struct io_mapping *fb;
2277 int i, v;
2278
2279 /* Map the framebuffer aperture */
2280 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2281 pci_resource_len(dev->pdev, 1));
2282 if (!fb)
2283 return -ENOMEM;
2284
2285 /* Sequencer off */
2286 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
2287
2288 if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
2289 goto out;
2290
2291 bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
2292
2293 /* If present load the hardcoded scrambling table */
2294 if (bios->legacy.mem_init_tbl_ptr) {
2295 uint32_t *scramble_tab = (uint32_t *)&bios->data[
2296 bios->legacy.mem_init_tbl_ptr + 0x10];
2297
2298 for (i = 0; i < 8; i++)
2299 bios_wr32(bios, NV04_PFB_SCRAMBLE(i),
2300 ROM32(scramble_tab[i]));
2301 }
2302
2303 /* Set memory type/width/length defaults depending on the straps */
2304 bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
2305
2306 if (ramcfg[1] & 0x80)
2307 bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
2308
2309 bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
2310 bios_md32(bios, NV04_PFB_CFG1, 0, 1);
2311
2312 /* Probe memory bus width */
2313 for (i = 0; i < 4; i++)
2314 poke_fb(dev, fb, 4 * i, patt);
2315
2316 if (peek_fb(dev, fb, 0xc) != patt)
2317 bios_md32(bios, NV04_PFB_BOOT_0,
2318 NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
2319
2320 /* Probe memory length */
2321 v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
2322
2323 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
2324 (!read_back_fb(dev, fb, 0x1000000, ++patt) ||
2325 !read_back_fb(dev, fb, 0, ++patt)))
2326 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2327 NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
2328
2329 if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
2330 !read_back_fb(dev, fb, 0x800000, ++patt))
2331 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2332 NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
2333
2334 if (!read_back_fb(dev, fb, 0x400000, ++patt))
2335 bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
2336 NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
2337
2338out:
2339 /* Sequencer on */
2340 NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
2341
2342 io_mapping_free(fb);
2343 return 0;
2344}
2345
2346static int
2347nv10_init_compute_mem(struct nvbios *bios)
2348{
2349 struct drm_device *dev = bios->dev;
2350 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2351 const int mem_width[] = { 0x10, 0x00, 0x20 };
2352 const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2);
2353 uint32_t patt = 0xdeadbeef;
2354 struct io_mapping *fb;
2355 int i, j, k;
2356
2357 /* Map the framebuffer aperture */
2358 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2359 pci_resource_len(dev->pdev, 1));
2360 if (!fb)
2361 return -ENOMEM;
2362
2363 bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
2364
2365 /* Probe memory bus width */
2366 for (i = 0; i < mem_width_count; i++) {
2367 bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]);
2368
2369 for (j = 0; j < 4; j++) {
2370 for (k = 0; k < 4; k++)
2371 poke_fb(dev, fb, 0x1c, 0);
2372
2373 poke_fb(dev, fb, 0x1c, patt);
2374 poke_fb(dev, fb, 0x3c, 0);
2375
2376 if (peek_fb(dev, fb, 0x1c) == patt)
2377 goto mem_width_found;
2378 }
2379 }
2380
2381mem_width_found:
2382 patt <<= 1;
2383
2384 /* Probe amount of installed memory */
2385 for (i = 0; i < 4; i++) {
2386 int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000;
2387
2388 poke_fb(dev, fb, off, patt);
2389 poke_fb(dev, fb, 0, 0);
2390
2391 peek_fb(dev, fb, 0);
2392 peek_fb(dev, fb, 0);
2393 peek_fb(dev, fb, 0);
2394 peek_fb(dev, fb, 0);
2395
2396 if (peek_fb(dev, fb, off) == patt)
2397 goto amount_found;
2398 }
2399
2400 /* IC missing - disable the upper half memory space. */
2401 bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0);
2402
2403amount_found:
2404 io_mapping_free(fb);
2405 return 0;
2406}
2407
2408static int
2409nv20_init_compute_mem(struct nvbios *bios)
2410{
2411 struct drm_device *dev = bios->dev;
2412 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2413 uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900);
2414 uint32_t amount, off;
2415 struct io_mapping *fb;
2416
2417 /* Map the framebuffer aperture */
2418 fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
2419 pci_resource_len(dev->pdev, 1));
2420 if (!fb)
2421 return -ENOMEM;
2422
2423 bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
2424
2425 /* Allow full addressing */
2426 bios_md32(bios, NV04_PFB_CFG0, 0, mask);
2427
2428 amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
2429 for (off = amount; off > 0x2000000; off -= 0x2000000)
2430 poke_fb(dev, fb, off - 4, off);
2431
2432 amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
2433 if (amount != peek_fb(dev, fb, amount - 4))
2434 /* IC missing - disable the upper half memory space. */
2435 bios_md32(bios, NV04_PFB_CFG0, mask, 0);
2436
2437 io_mapping_free(fb);
2438 return 0;
2439}
2440
2441static int
2442init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2443{
2444 /*
2445 * INIT_COMPUTE_MEM opcode: 0x63 ('c')
2446 *
2447 * offset (8 bit): opcode
2448 *
2449 * This opcode is meant to set the PFB memory config registers
2450 * appropriately so that we can correctly calculate how much VRAM it
2451 * has (on nv10 and better chipsets the amount of installed VRAM is
2452 * subsequently reported in NV_PFB_CSTATUS (0x10020C)).
2453 *
2454 * The implementation of this opcode in general consists of several
2455 * parts:
2456 *
2457 * 1) Determination of memory type and density. Only necessary for
2458 * really old chipsets, the memory type reported by the strap bits
2459 * (0x101000) is assumed to be accurate on nv05 and newer.
2460 *
2461 * 2) Determination of the memory bus width. Usually done by a cunning
2462 * combination of writes to offsets 0x1c and 0x3c in the fb, and
2463 * seeing whether the written values are read back correctly.
2464 *
2465 * Only necessary on nv0x-nv1x and nv34, on the other cards we can
2466 * trust the straps.
2467 *
2468 * 3) Determination of how many of the card's RAM pads have ICs
2469 * attached, usually done by a cunning combination of writes to an
2470 * offset slightly less than the maximum memory reported by
2471 * NV_PFB_CSTATUS, then seeing if the test pattern can be read back.
2472 *
2473 * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io
2474 * logs of the VBIOS and kmmio traces of the binary driver POSTing the
2475 * card show nothing being done for this opcode. Why is it still listed
2476 * in the table?!
2477 */
2478
2479 /* no iexec->execute check by design */
2480
2481 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2482 int ret;
2483
2484 if (dev_priv->chipset >= 0x40 ||
2485 dev_priv->chipset == 0x1a ||
2486 dev_priv->chipset == 0x1f)
2487 ret = 0;
2488 else if (dev_priv->chipset >= 0x20 &&
2489 dev_priv->chipset != 0x34)
2490 ret = nv20_init_compute_mem(bios);
2491 else if (dev_priv->chipset >= 0x10)
2492 ret = nv10_init_compute_mem(bios);
2493 else if (dev_priv->chipset >= 0x5)
2494 ret = nv05_init_compute_mem(bios);
2495 else
2496 ret = nv04_init_compute_mem(bios);
2497
2498 if (ret)
2499 return ret;
2500
2501 return 1;
2502}
2503
2504static int
2505init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2506{
2507 /*
2508 * INIT_RESET opcode: 0x65 ('e')
2509 *
2510 * offset (8 bit): opcode
2511 * offset + 1 (32 bit): register
2512 * offset + 5 (32 bit): value1
2513 * offset + 9 (32 bit): value2
2514 *
2515 * Assign "value1" to "register", then assign "value2" to "register"
2516 */
2517
2518 uint32_t reg = ROM32(bios->data[offset + 1]);
2519 uint32_t value1 = ROM32(bios->data[offset + 5]);
2520 uint32_t value2 = ROM32(bios->data[offset + 9]);
2521 uint32_t pci_nv_19, pci_nv_20;
2522
2523 /* no iexec->execute check by design */
2524
2525 pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
2526 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00);
2527
2528 bios_wr32(bios, reg, value1);
2529
2530 udelay(10);
2531
2532 bios_wr32(bios, reg, value2);
2533 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
2534
2535 pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
2536 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
2537 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
2538
2539 return 13;
2540}
2541
2542static int
2543init_configure_mem(struct nvbios *bios, uint16_t offset,
2544 struct init_exec *iexec)
2545{
2546 /*
2547 * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
2548 *
2549 * offset (8 bit): opcode
2550 *
2551 * Equivalent to INIT_DONE on bios version 3 or greater.
2552 * For early bios versions, sets up the memory registers, using values
2553 * taken from the memory init table
2554 */
2555
2556 /* no iexec->execute check by design */
2557
2558 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2559 uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
2560 uint32_t reg, data;
2561
2562 if (bios->major_version > 2)
2563 return 0;
2564
2565 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
2566 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
2567
2568 if (bios->data[meminitoffs] & 1)
2569 seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
2570
2571 for (reg = ROM32(bios->data[seqtbloffs]);
2572 reg != 0xffffffff;
2573 reg = ROM32(bios->data[seqtbloffs += 4])) {
2574
2575 switch (reg) {
2576 case NV04_PFB_PRE:
2577 data = NV04_PFB_PRE_CMD_PRECHARGE;
2578 break;
2579 case NV04_PFB_PAD:
2580 data = NV04_PFB_PAD_CKE_NORMAL;
2581 break;
2582 case NV04_PFB_REF:
2583 data = NV04_PFB_REF_CMD_REFRESH;
2584 break;
2585 default:
2586 data = ROM32(bios->data[meminitdata]);
2587 meminitdata += 4;
2588 if (data == 0xffffffff)
2589 continue;
2590 }
2591
2592 bios_wr32(bios, reg, data);
2593 }
2594
2595 return 1;
2596}
2597
2598static int
2599init_configure_clk(struct nvbios *bios, uint16_t offset,
2600 struct init_exec *iexec)
2601{
2602 /*
2603 * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
2604 *
2605 * offset (8 bit): opcode
2606 *
2607 * Equivalent to INIT_DONE on bios version 3 or greater.
2608 * For early bios versions, sets up the NVClk and MClk PLLs, using
2609 * values taken from the memory init table
2610 */
2611
2612 /* no iexec->execute check by design */
2613
2614 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2615 int clock;
2616
2617 if (bios->major_version > 2)
2618 return 0;
2619
2620 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2621 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
2622
2623 clock = ROM16(bios->data[meminitoffs + 2]) * 10;
2624 if (bios->data[meminitoffs] & 1) /* DDR */
2625 clock *= 2;
2626 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2627
2628 return 1;
2629}
2630
2631static int
2632init_configure_preinit(struct nvbios *bios, uint16_t offset,
2633 struct init_exec *iexec)
2634{
2635 /*
2636 * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
2637 *
2638 * offset (8 bit): opcode
2639 *
2640 * Equivalent to INIT_DONE on bios version 3 or greater.
2641 * For early bios versions, does early init, loading ram and crystal
2642 * configuration from straps into CR3C
2643 */
2644
2645 /* no iexec->execute check by design */
2646
2647 uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
2648 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & 0x40) >> 6;
2649
2650 if (bios->major_version > 2)
2651 return 0;
2652
2653 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2654 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2655
2656 return 1;
2657}
2658
2659static int
2660init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2661{
2662 /*
2663 * INIT_IO opcode: 0x69 ('i')
2664 *
2665 * offset (8 bit): opcode
2666 * offset + 1 (16 bit): CRTC port
2667 * offset + 3 (8 bit): mask
2668 * offset + 4 (8 bit): data
2669 *
2670 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
2671 */
2672
2673 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2674 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2675 uint8_t mask = bios->data[offset + 3];
2676 uint8_t data = bios->data[offset + 4];
2677
2678 if (!iexec->execute)
2679 return 5;
2680
2681 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2682 offset, crtcport, mask, data);
2683
2684 /*
2685 * I have no idea what this does, but NVIDIA do this magic sequence
2686 * in the places where this INIT_IO happens..
2687 */
2688 if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
2689 int i;
2690
2691 bios_wr32(bios, 0x614100, (bios_rd32(
2692 bios, 0x614100) & 0x0fffffff) | 0x00800000);
2693
2694 bios_wr32(bios, 0x00e18c, bios_rd32(
2695 bios, 0x00e18c) | 0x00020000);
2696
2697 bios_wr32(bios, 0x614900, (bios_rd32(
2698 bios, 0x614900) & 0x0fffffff) | 0x00800000);
2699
2700 bios_wr32(bios, 0x000200, bios_rd32(
2701 bios, 0x000200) & ~0x40000000);
2702
2703 mdelay(10);
2704
2705 bios_wr32(bios, 0x00e18c, bios_rd32(
2706 bios, 0x00e18c) & ~0x00020000);
2707
2708 bios_wr32(bios, 0x000200, bios_rd32(
2709 bios, 0x000200) | 0x40000000);
2710
2711 bios_wr32(bios, 0x614100, 0x00800018);
2712 bios_wr32(bios, 0x614900, 0x00800018);
2713
2714 mdelay(10);
2715
2716 bios_wr32(bios, 0x614100, 0x10000018);
2717 bios_wr32(bios, 0x614900, 0x10000018);
2718
2719 for (i = 0; i < 3; i++)
2720 bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
2721 bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
2722
2723 for (i = 0; i < 2; i++)
2724 bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
2725 bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
2726
2727 for (i = 0; i < 3; i++)
2728 bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
2729 bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
2730
2731 for (i = 0; i < 2; i++)
2732 bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
2733 bios, 0x614200 + (i*0x800)) & 0xfffffff0);
2734
2735 for (i = 0; i < 2; i++)
2736 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2737 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2738 return 5;
2739 }
2740
2741 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2742 data);
2743 return 5;
2744}
2745
2746static int
2747init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2748{
2749 /*
2750 * INIT_SUB opcode: 0x6B ('k')
2751 *
2752 * offset (8 bit): opcode
2753 * offset + 1 (8 bit): script number
2754 *
2755 * Execute script number "script number", as a subroutine
2756 */
2757
2758 uint8_t sub = bios->data[offset + 1];
2759
2760 if (!iexec->execute)
2761 return 2;
2762
2763 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2764
2765 parse_init_table(bios,
2766 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
2767 iexec);
2768
2769 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2770
2771 return 2;
2772}
2773
2774static int
2775init_ram_condition(struct nvbios *bios, uint16_t offset,
2776 struct init_exec *iexec)
2777{
2778 /*
2779 * INIT_RAM_CONDITION opcode: 0x6D ('m')
2780 *
2781 * offset (8 bit): opcode
2782 * offset + 1 (8 bit): mask
2783 * offset + 2 (8 bit): cmpval
2784 *
2785 * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval".
2786 * If condition not met skip subsequent opcodes until condition is
2787 * inverted (INIT_NOT), or we hit INIT_RESUME
2788 */
2789
2790 uint8_t mask = bios->data[offset + 1];
2791 uint8_t cmpval = bios->data[offset + 2];
2792 uint8_t data;
2793
2794 if (!iexec->execute)
2795 return 3;
2796
2797 data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask;
2798
2799 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
2800 offset, data, cmpval);
2801
2802 if (data == cmpval)
2803 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2804 else {
2805 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2806 iexec->execute = false;
2807 }
2808
2809 return 3;
2810}
2811
2812static int
2813init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2814{
2815 /*
2816 * INIT_NV_REG opcode: 0x6E ('n')
2817 *
2818 * offset (8 bit): opcode
2819 * offset + 1 (32 bit): register
2820 * offset + 5 (32 bit): mask
2821 * offset + 9 (32 bit): data
2822 *
2823 * Assign ((REGVAL("register") & "mask") | "data") to "register"
2824 */
2825
2826 uint32_t reg = ROM32(bios->data[offset + 1]);
2827 uint32_t mask = ROM32(bios->data[offset + 5]);
2828 uint32_t data = ROM32(bios->data[offset + 9]);
2829
2830 if (!iexec->execute)
2831 return 13;
2832
2833 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2834 offset, reg, mask, data);
2835
2836 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2837
2838 return 13;
2839}
2840
2841static int
2842init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2843{
2844 /*
2845 * INIT_MACRO opcode: 0x6F ('o')
2846 *
2847 * offset (8 bit): opcode
2848 * offset + 1 (8 bit): macro number
2849 *
2850 * Look up macro index "macro number" in the macro index table.
2851 * The macro index table entry has 1 byte for the index in the macro
2852 * table, and 1 byte for the number of times to repeat the macro.
2853 * The macro table entry has 4 bytes for the register address and
2854 * 4 bytes for the value to write to that register
2855 */
2856
2857 uint8_t macro_index_tbl_idx = bios->data[offset + 1];
2858 uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
2859 uint8_t macro_tbl_idx = bios->data[tmp];
2860 uint8_t count = bios->data[tmp + 1];
2861 uint32_t reg, data;
2862 int i;
2863
2864 if (!iexec->execute)
2865 return 2;
2866
2867 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2868 "Count: 0x%02X\n",
2869 offset, macro_index_tbl_idx, macro_tbl_idx, count);
2870
2871 for (i = 0; i < count; i++) {
2872 uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
2873
2874 reg = ROM32(bios->data[macroentryptr]);
2875 data = ROM32(bios->data[macroentryptr + 4]);
2876
2877 bios_wr32(bios, reg, data);
2878 }
2879
2880 return 2;
2881}
2882
2883static int
2884init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2885{
2886 /*
2887 * INIT_DONE opcode: 0x71 ('q')
2888 *
2889 * offset (8 bit): opcode
2890 *
2891 * End the current script
2892 */
2893
2894 /* mild retval abuse to stop parsing this table */
2895 return 0;
2896}
2897
2898static int
2899init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2900{
2901 /*
2902 * INIT_RESUME opcode: 0x72 ('r')
2903 *
2904 * offset (8 bit): opcode
2905 *
2906 * End the current execute / no-execute condition
2907 */
2908
2909 if (iexec->execute)
2910 return 1;
2911
2912 iexec->execute = true;
2913 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2914
2915 return 1;
2916}
2917
2918static int
2919init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2920{
2921 /*
2922 * INIT_TIME opcode: 0x74 ('t')
2923 *
2924 * offset (8 bit): opcode
2925 * offset + 1 (16 bit): time
2926 *
2927 * Sleep for "time" microseconds.
2928 */
2929
2930 unsigned time = ROM16(bios->data[offset + 1]);
2931
2932 if (!iexec->execute)
2933 return 3;
2934
2935 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2936 offset, time);
2937
2938 if (time < 1000)
2939 udelay(time);
2940 else
2941 mdelay((time + 900) / 1000);
2942
2943 return 3;
2944}
2945
2946static int
2947init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2948{
2949 /*
2950 * INIT_CONDITION opcode: 0x75 ('u')
2951 *
2952 * offset (8 bit): opcode
2953 * offset + 1 (8 bit): condition number
2954 *
2955 * Check condition "condition number" in the condition table.
2956 * If condition not met skip subsequent opcodes until condition is
2957 * inverted (INIT_NOT), or we hit INIT_RESUME
2958 */
2959
2960 uint8_t cond = bios->data[offset + 1];
2961
2962 if (!iexec->execute)
2963 return 2;
2964
2965 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2966
2967 if (bios_condition_met(bios, offset, cond))
2968 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2969 else {
2970 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2971 iexec->execute = false;
2972 }
2973
2974 return 2;
2975}
2976
2977static int
2978init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2979{
2980 /*
2981 * INIT_IO_CONDITION opcode: 0x76
2982 *
2983 * offset (8 bit): opcode
2984 * offset + 1 (8 bit): condition number
2985 *
2986 * Check condition "condition number" in the io condition table.
2987 * If condition not met skip subsequent opcodes until condition is
2988 * inverted (INIT_NOT), or we hit INIT_RESUME
2989 */
2990
2991 uint8_t cond = bios->data[offset + 1];
2992
2993 if (!iexec->execute)
2994 return 2;
2995
2996 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2997
2998 if (io_condition_met(bios, offset, cond))
2999 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
3000 else {
3001 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
3002 iexec->execute = false;
3003 }
3004
3005 return 2;
3006}
3007
3008static int
3009init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3010{
3011 /*
3012 * INIT_INDEX_IO opcode: 0x78 ('x')
3013 *
3014 * offset (8 bit): opcode
3015 * offset + 1 (16 bit): CRTC port
3016 * offset + 3 (8 bit): CRTC index
3017 * offset + 4 (8 bit): mask
3018 * offset + 5 (8 bit): data
3019 *
3020 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
3021 * OR with "data", write-back
3022 */
3023
3024 uint16_t crtcport = ROM16(bios->data[offset + 1]);
3025 uint8_t crtcindex = bios->data[offset + 3];
3026 uint8_t mask = bios->data[offset + 4];
3027 uint8_t data = bios->data[offset + 5];
3028 uint8_t value;
3029
3030 if (!iexec->execute)
3031 return 6;
3032
3033 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
3034 "Data: 0x%02X\n",
3035 offset, crtcport, crtcindex, mask, data);
3036
3037 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
3038 bios_idxprt_wr(bios, crtcport, crtcindex, value);
3039
3040 return 6;
3041}
3042
3043static int
3044init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3045{
3046 /*
3047 * INIT_PLL opcode: 0x79 ('y')
3048 *
3049 * offset (8 bit): opcode
3050 * offset + 1 (32 bit): register
3051 * offset + 5 (16 bit): freq
3052 *
3053 * Set PLL register "register" to coefficients for frequency (10kHz)
3054 * "freq"
3055 */
3056
3057 uint32_t reg = ROM32(bios->data[offset + 1]);
3058 uint16_t freq = ROM16(bios->data[offset + 5]);
3059
3060 if (!iexec->execute)
3061 return 7;
3062
3063 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
3064
3065 setPLL(bios, reg, freq * 10);
3066
3067 return 7;
3068}
3069
3070static int
3071init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3072{
3073 /*
3074 * INIT_ZM_REG opcode: 0x7A ('z')
3075 *
3076 * offset (8 bit): opcode
3077 * offset + 1 (32 bit): register
3078 * offset + 5 (32 bit): value
3079 *
3080 * Assign "value" to "register"
3081 */
3082
3083 uint32_t reg = ROM32(bios->data[offset + 1]);
3084 uint32_t value = ROM32(bios->data[offset + 5]);
3085
3086 if (!iexec->execute)
3087 return 9;
3088
3089 if (reg == 0x000200)
3090 value |= 1;
3091
3092 bios_wr32(bios, reg, value);
3093
3094 return 9;
3095}
3096
3097static int
3098init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
3099 struct init_exec *iexec)
3100{
3101 /*
3102 * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
3103 *
3104 * offset (8 bit): opcode
3105 * offset + 1 (8 bit): PLL type
3106 * offset + 2 (32 bit): frequency 0
3107 *
3108 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
3109 * ram_restrict_table_ptr. The value read from there is used to select
3110 * a frequency from the table starting at 'frequency 0' to be
3111 * programmed into the PLL corresponding to 'type'.
3112 *
3113 * The PLL limits table on cards using this opcode has a mapping of
3114 * 'type' to the relevant registers.
3115 */
3116
3117 struct drm_device *dev = bios->dev;
3118 uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
3119 uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
3120 uint8_t type = bios->data[offset + 1];
3121 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
3122 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
3123 int len = 2 + bios->ram_restrict_group_count * 4;
3124 int i;
3125
3126 if (!iexec->execute)
3127 return len;
3128
3129 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
3130 NV_ERROR(dev, "PLL limits table not version 3.x\n");
3131 return len; /* deliberate, allow default clocks to remain */
3132 }
3133
3134 entry = pll_limits + pll_limits[1];
3135 for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
3136 if (entry[0] == type) {
3137 uint32_t reg = ROM32(entry[3]);
3138
3139 BIOSLOG(bios, "0x%04X: "
3140 "Type %02x Reg 0x%08x Freq %dKHz\n",
3141 offset, type, reg, freq);
3142
3143 setPLL(bios, reg, freq);
3144 return len;
3145 }
3146 }
3147
3148 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
3149 return len;
3150}
3151
3152static int
3153init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3154{
3155 /*
3156 * INIT_8C opcode: 0x8C ('')
3157 *
3158 * NOP so far....
3159 *
3160 */
3161
3162 return 1;
3163}
3164
3165static int
3166init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3167{
3168 /*
3169 * INIT_8D opcode: 0x8D ('')
3170 *
3171 * NOP so far....
3172 *
3173 */
3174
3175 return 1;
3176}
3177
3178static int
3179init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3180{
3181 /*
3182 * INIT_GPIO opcode: 0x8E ('')
3183 *
3184 * offset (8 bit): opcode
3185 *
3186 * Loop over all entries in the DCB GPIO table, and initialise
3187 * each GPIO according to various values listed in each entry
3188 */
3189
3190 if (iexec->execute && bios->execute)
3191 nouveau_gpio_reset(bios->dev);
3192
3193 return 1;
3194}
3195
3196static int
3197init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
3198 struct init_exec *iexec)
3199{
3200 /*
3201 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
3202 *
3203 * offset (8 bit): opcode
3204 * offset + 1 (32 bit): reg
3205 * offset + 5 (8 bit): regincrement
3206 * offset + 6 (8 bit): count
3207 * offset + 7 (32 bit): value 1,1
3208 * ...
3209 *
3210 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
3211 * ram_restrict_table_ptr. The value read from here is 'n', and
3212 * "value 1,n" gets written to "reg". This repeats "count" times and on
3213 * each iteration 'm', "reg" increases by "regincrement" and
3214 * "value m,n" is used. The extent of n is limited by a number read
3215 * from the 'M' BIT table, herein called "blocklen"
3216 */
3217
3218 uint32_t reg = ROM32(bios->data[offset + 1]);
3219 uint8_t regincrement = bios->data[offset + 5];
3220 uint8_t count = bios->data[offset + 6];
3221 uint32_t strap_ramcfg, data;
3222 /* previously set by 'M' BIT table */
3223 uint16_t blocklen = bios->ram_restrict_group_count * 4;
3224 int len = 7 + count * blocklen;
3225 uint8_t index;
3226 int i;
3227
3228 /* critical! to know the length of the opcode */;
3229 if (!blocklen) {
3230 NV_ERROR(bios->dev,
3231 "0x%04X: Zero block length - has the M table "
3232 "been parsed?\n", offset);
3233 return -EINVAL;
3234 }
3235
3236 if (!iexec->execute)
3237 return len;
3238
3239 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
3240 index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
3241
3242 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
3243 "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
3244 offset, reg, regincrement, count, strap_ramcfg, index);
3245
3246 for (i = 0; i < count; i++) {
3247 data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
3248
3249 bios_wr32(bios, reg, data);
3250
3251 reg += regincrement;
3252 }
3253
3254 return len;
3255}
3256
3257static int
3258init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3259{
3260 /*
3261 * INIT_COPY_ZM_REG opcode: 0x90 ('')
3262 *
3263 * offset (8 bit): opcode
3264 * offset + 1 (32 bit): src reg
3265 * offset + 5 (32 bit): dst reg
3266 *
3267 * Put contents of "src reg" into "dst reg"
3268 */
3269
3270 uint32_t srcreg = ROM32(bios->data[offset + 1]);
3271 uint32_t dstreg = ROM32(bios->data[offset + 5]);
3272
3273 if (!iexec->execute)
3274 return 9;
3275
3276 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
3277
3278 return 9;
3279}
3280
3281static int
3282init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
3283 struct init_exec *iexec)
3284{
3285 /*
3286 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
3287 *
3288 * offset (8 bit): opcode
3289 * offset + 1 (32 bit): dst reg
3290 * offset + 5 (8 bit): count
3291 * offset + 6 (32 bit): data 1
3292 * ...
3293 *
3294 * For each of "count" values write "data n" to "dst reg"
3295 */
3296
3297 uint32_t reg = ROM32(bios->data[offset + 1]);
3298 uint8_t count = bios->data[offset + 5];
3299 int len = 6 + count * 4;
3300 int i;
3301
3302 if (!iexec->execute)
3303 return len;
3304
3305 for (i = 0; i < count; i++) {
3306 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
3307 bios_wr32(bios, reg, data);
3308 }
3309
3310 return len;
3311}
3312
3313static int
3314init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3315{
3316 /*
3317 * INIT_RESERVED opcode: 0x92 ('')
3318 *
3319 * offset (8 bit): opcode
3320 *
3321 * Seemingly does nothing
3322 */
3323
3324 return 1;
3325}
3326
3327static int
3328init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3329{
3330 /*
3331 * INIT_96 opcode: 0x96 ('')
3332 *
3333 * offset (8 bit): opcode
3334 * offset + 1 (32 bit): sreg
3335 * offset + 5 (8 bit): sshift
3336 * offset + 6 (8 bit): smask
3337 * offset + 7 (8 bit): index
3338 * offset + 8 (32 bit): reg
3339 * offset + 12 (32 bit): mask
3340 * offset + 16 (8 bit): shift
3341 *
3342 */
3343
3344 uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
3345 uint32_t reg = ROM32(bios->data[offset + 8]);
3346 uint32_t mask = ROM32(bios->data[offset + 12]);
3347 uint32_t val;
3348
3349 val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
3350 if (bios->data[offset + 5] < 0x80)
3351 val >>= bios->data[offset + 5];
3352 else
3353 val <<= (0x100 - bios->data[offset + 5]);
3354 val &= bios->data[offset + 6];
3355
3356 val = bios->data[ROM16(bios->data[xlatptr]) + val];
3357 val <<= bios->data[offset + 16];
3358
3359 if (!iexec->execute)
3360 return 17;
3361
3362 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
3363 return 17;
3364}
3365
3366static int
3367init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3368{
3369 /*
3370 * INIT_97 opcode: 0x97 ('')
3371 *
3372 * offset (8 bit): opcode
3373 * offset + 1 (32 bit): register
3374 * offset + 5 (32 bit): mask
3375 * offset + 9 (32 bit): value
3376 *
3377 * Adds "value" to "register" preserving the fields specified
3378 * by "mask"
3379 */
3380
3381 uint32_t reg = ROM32(bios->data[offset + 1]);
3382 uint32_t mask = ROM32(bios->data[offset + 5]);
3383 uint32_t add = ROM32(bios->data[offset + 9]);
3384 uint32_t val;
3385
3386 val = bios_rd32(bios, reg);
3387 val = (val & mask) | ((val + add) & ~mask);
3388
3389 if (!iexec->execute)
3390 return 13;
3391
3392 bios_wr32(bios, reg, val);
3393 return 13;
3394}
3395
3396static int
3397init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3398{
3399 /*
3400 * INIT_AUXCH opcode: 0x98 ('')
3401 *
3402 * offset (8 bit): opcode
3403 * offset + 1 (32 bit): address
3404 * offset + 5 (8 bit): count
3405 * offset + 6 (8 bit): mask 0
3406 * offset + 7 (8 bit): data 0
3407 * ...
3408 *
3409 */
3410
3411 struct drm_device *dev = bios->dev;
3412 struct nouveau_i2c_chan *auxch;
3413 uint32_t addr = ROM32(bios->data[offset + 1]);
3414 uint8_t count = bios->data[offset + 5];
3415 int len = 6 + count * 2;
3416 int ret, i;
3417
3418 if (!bios->display.output) {
3419 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
3420 return len;
3421 }
3422
3423 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
3424 if (!auxch) {
3425 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
3426 bios->display.output->i2c_index);
3427 return len;
3428 }
3429
3430 if (!iexec->execute)
3431 return len;
3432
3433 offset += 6;
3434 for (i = 0; i < count; i++, offset += 2) {
3435 uint8_t data;
3436
3437 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
3438 if (ret) {
3439 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
3440 return len;
3441 }
3442
3443 data &= bios->data[offset + 0];
3444 data |= bios->data[offset + 1];
3445
3446 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
3447 if (ret) {
3448 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
3449 return len;
3450 }
3451 }
3452
3453 return len;
3454}
3455
3456static int
3457init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3458{
3459 /*
3460 * INIT_ZM_AUXCH opcode: 0x99 ('')
3461 *
3462 * offset (8 bit): opcode
3463 * offset + 1 (32 bit): address
3464 * offset + 5 (8 bit): count
3465 * offset + 6 (8 bit): data 0
3466 * ...
3467 *
3468 */
3469
3470 struct drm_device *dev = bios->dev;
3471 struct nouveau_i2c_chan *auxch;
3472 uint32_t addr = ROM32(bios->data[offset + 1]);
3473 uint8_t count = bios->data[offset + 5];
3474 int len = 6 + count;
3475 int ret, i;
3476
3477 if (!bios->display.output) {
3478 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
3479 return len;
3480 }
3481
3482 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
3483 if (!auxch) {
3484 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
3485 bios->display.output->i2c_index);
3486 return len;
3487 }
3488
3489 if (!iexec->execute)
3490 return len;
3491
3492 offset += 6;
3493 for (i = 0; i < count; i++, offset++) {
3494 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
3495 if (ret) {
3496 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
3497 return len;
3498 }
3499 }
3500
3501 return len;
3502}
3503
3504static int
3505init_i2c_long_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3506{
3507 /*
3508 * INIT_I2C_LONG_IF opcode: 0x9A ('')
3509 *
3510 * offset (8 bit): opcode
3511 * offset + 1 (8 bit): DCB I2C table entry index
3512 * offset + 2 (8 bit): I2C slave address
3513 * offset + 3 (16 bit): I2C register
3514 * offset + 5 (8 bit): mask
3515 * offset + 6 (8 bit): data
3516 *
3517 * Read the register given by "I2C register" on the device addressed
3518 * by "I2C slave address" on the I2C bus given by "DCB I2C table
3519 * entry index". Compare the result AND "mask" to "data".
3520 * If they're not equal, skip subsequent opcodes until condition is
3521 * inverted (INIT_NOT), or we hit INIT_RESUME
3522 */
3523
3524 uint8_t i2c_index = bios->data[offset + 1];
3525 uint8_t i2c_address = bios->data[offset + 2] >> 1;
3526 uint8_t reglo = bios->data[offset + 3];
3527 uint8_t reghi = bios->data[offset + 4];
3528 uint8_t mask = bios->data[offset + 5];
3529 uint8_t data = bios->data[offset + 6];
3530 struct nouveau_i2c_chan *chan;
3531 uint8_t buf0[2] = { reghi, reglo };
3532 uint8_t buf1[1];
3533 struct i2c_msg msg[2] = {
3534 { i2c_address, 0, 1, buf0 },
3535 { i2c_address, I2C_M_RD, 1, buf1 },
3536 };
3537 int ret;
3538
3539 /* no execute check by design */
3540
3541 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
3542 offset, i2c_index, i2c_address);
3543
3544 chan = init_i2c_device_find(bios->dev, i2c_index);
3545 if (!chan)
3546 return -ENODEV;
3547
3548
3549 ret = i2c_transfer(&chan->adapter, msg, 2);
3550 if (ret < 0) {
3551 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: [no device], "
3552 "Mask: 0x%02X, Data: 0x%02X\n",
3553 offset, reghi, reglo, mask, data);
3554 iexec->execute = 0;
3555 return 7;
3556 }
3557
3558 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: 0x%02X, "
3559 "Mask: 0x%02X, Data: 0x%02X\n",
3560 offset, reghi, reglo, buf1[0], mask, data);
3561
3562 iexec->execute = ((buf1[0] & mask) == data);
3563
3564 return 7;
3565}
3566
3567static struct init_tbl_entry itbl_entry[] = {
3568 /* command name , id , length , offset , mult , command handler */
3569 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
3570 { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog },
3571 { "INIT_REPEAT" , 0x33, init_repeat },
3572 { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll },
3573 { "INIT_END_REPEAT" , 0x36, init_end_repeat },
3574 { "INIT_COPY" , 0x37, init_copy },
3575 { "INIT_NOT" , 0x38, init_not },
3576 { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition },
3577 { "INIT_DP_CONDITION" , 0x3A, init_dp_condition },
3578 { "INIT_OP_3B" , 0x3B, init_op_3b },
3579 { "INIT_OP_3C" , 0x3C, init_op_3c },
3580 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched },
3581 { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 },
3582 { "INIT_PLL2" , 0x4B, init_pll2 },
3583 { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte },
3584 { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte },
3585 { "INIT_ZM_I2C" , 0x4E, init_zm_i2c },
3586 { "INIT_TMDS" , 0x4F, init_tmds },
3587 { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group },
3588 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch },
3589 { "INIT_CR" , 0x52, init_cr },
3590 { "INIT_ZM_CR" , 0x53, init_zm_cr },
3591 { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
3592 { "INIT_CONDITION_TIME" , 0x56, init_condition_time },
3593 { "INIT_LTIME" , 0x57, init_ltime },
3594 { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
3595 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
3596 { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
3597 { "INIT_JUMP" , 0x5C, init_jump },
3598 { "INIT_I2C_IF" , 0x5E, init_i2c_if },
3599 { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
3600 { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
3601 { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
3602 { "INIT_RESET" , 0x65, init_reset },
3603 { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem },
3604 { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk },
3605 { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit },
3606 { "INIT_IO" , 0x69, init_io },
3607 { "INIT_SUB" , 0x6B, init_sub },
3608 { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition },
3609 { "INIT_NV_REG" , 0x6E, init_nv_reg },
3610 { "INIT_MACRO" , 0x6F, init_macro },
3611 { "INIT_DONE" , 0x71, init_done },
3612 { "INIT_RESUME" , 0x72, init_resume },
3613 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
3614 { "INIT_TIME" , 0x74, init_time },
3615 { "INIT_CONDITION" , 0x75, init_condition },
3616 { "INIT_IO_CONDITION" , 0x76, init_io_condition },
3617 { "INIT_INDEX_IO" , 0x78, init_index_io },
3618 { "INIT_PLL" , 0x79, init_pll },
3619 { "INIT_ZM_REG" , 0x7A, init_zm_reg },
3620 { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll },
3621 { "INIT_8C" , 0x8C, init_8c },
3622 { "INIT_8D" , 0x8D, init_8d },
3623 { "INIT_GPIO" , 0x8E, init_gpio },
3624 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group },
3625 { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg },
3626 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched },
3627 { "INIT_RESERVED" , 0x92, init_reserved },
3628 { "INIT_96" , 0x96, init_96 },
3629 { "INIT_97" , 0x97, init_97 },
3630 { "INIT_AUXCH" , 0x98, init_auxch },
3631 { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
3632 { "INIT_I2C_LONG_IF" , 0x9A, init_i2c_long_if },
3633 { NULL , 0 , NULL }
3634};
3635
3636#define MAX_TABLE_OPS 1000
3637
3638static int
3639parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
3640{
3641 /*
3642 * Parses all commands in an init table.
3643 *
3644 * We start out executing all commands found in the init table. Some
3645 * opcodes may change the status of iexec->execute to SKIP, which will
3646 * cause the following opcodes to perform no operation until the value
3647 * is changed back to EXECUTE.
3648 */
3649
3650 int count = 0, i, ret;
3651 uint8_t id;
3652
3653 /* catch NULL script pointers */
3654 if (offset == 0)
3655 return 0;
3656
3657 /*
3658 * Loop until INIT_DONE causes us to break out of the loop
3659 * (or until offset > bios length just in case... )
3660 * (and no more than MAX_TABLE_OPS iterations, just in case... )
3661 */
3662 while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
3663 id = bios->data[offset];
3664
3665 /* Find matching id in itbl_entry */
3666 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3667 ;
3668
3669 if (!itbl_entry[i].name) {
3670 NV_ERROR(bios->dev,
3671 "0x%04X: Init table command not found: "
3672 "0x%02X\n", offset, id);
3673 return -ENOENT;
3674 }
3675
3676 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n", offset,
3677 itbl_entry[i].id, itbl_entry[i].name);
3678
3679 /* execute eventual command handler */
3680 ret = (*itbl_entry[i].handler)(bios, offset, iexec);
3681 if (ret < 0) {
3682 NV_ERROR(bios->dev, "0x%04X: Failed parsing init "
3683 "table opcode: %s %d\n", offset,
3684 itbl_entry[i].name, ret);
3685 }
3686
3687 if (ret <= 0)
3688 break;
3689
3690 /*
3691 * Add the offset of the current command including all data
3692 * of that command. The offset will then be pointing on the
3693 * next op code.
3694 */
3695 offset += ret;
3696 }
3697
3698 if (offset >= bios->length)
3699 NV_WARN(bios->dev,
3700 "Offset 0x%04X greater than known bios image length. "
3701 "Corrupt image?\n", offset);
3702 if (count >= MAX_TABLE_OPS)
3703 NV_WARN(bios->dev,
3704 "More than %d opcodes to a table is unlikely, "
3705 "is the bios image corrupt?\n", MAX_TABLE_OPS);
3706
3707 return 0;
3708}
3709
3710static void
3711parse_init_tables(struct nvbios *bios)
3712{
3713 /* Loops and calls parse_init_table() for each present table. */
3714
3715 int i = 0;
3716 uint16_t table;
3717 struct init_exec iexec = {true, false};
3718
3719 if (bios->old_style_init) {
3720 if (bios->init_script_tbls_ptr)
3721 parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
3722 if (bios->extra_init_script_tbl_ptr)
3723 parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
3724
3725 return;
3726 }
3727
3728 while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
3729 NV_INFO(bios->dev,
3730 "Parsing VBIOS init table %d at offset 0x%04X\n",
3731 i / 2, table);
3732 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
3733
3734 parse_init_table(bios, table, &iexec);
3735 i += 2;
3736 }
3737}
3738
3739static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk) 70static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3740{ 71{
3741 int compare_record_len, i = 0; 72 int compare_record_len, i = 0;
@@ -3764,28 +95,24 @@ static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3764 95
3765static void 96static void
3766run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, 97run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3767 struct dcb_entry *dcbent, int head, bool dl) 98 struct dcb_output *dcbent, int head, bool dl)
3768{ 99{
3769 struct drm_nouveau_private *dev_priv = dev->dev_private; 100 struct nouveau_drm *drm = nouveau_drm(dev);
3770 struct nvbios *bios = &dev_priv->vbios;
3771 struct init_exec iexec = {true, false};
3772 101
3773 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", 102 NV_INFO(drm, "0x%04X: Parsing digital output script table\n",
3774 scriptptr); 103 scriptptr);
3775 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44, 104 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, head ? NV_CIO_CRE_44_HEADB :
3776 head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA); 105 NV_CIO_CRE_44_HEADA);
3777 /* note: if dcb entries have been merged, index may be misleading */ 106 nouveau_bios_run_init_table(dev, scriptptr, dcbent, head);
3778 NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
3779 parse_init_table(bios, scriptptr, &iexec);
3780 107
3781 nv04_dfp_bind_head(dev, dcbent, head, dl); 108 nv04_dfp_bind_head(dev, dcbent, head, dl);
3782} 109}
3783 110
3784static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) 111static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script)
3785{ 112{
3786 struct drm_nouveau_private *dev_priv = dev->dev_private; 113 struct nouveau_drm *drm = nouveau_drm(dev);
3787 struct nvbios *bios = &dev_priv->vbios; 114 struct nvbios *bios = &drm->vbios;
3788 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); 115 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0);
3789 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); 116 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3790 117
3791 if (!bios->fp.xlated_entry || !sub || !scriptofs) 118 if (!bios->fp.xlated_entry || !sub || !scriptofs)
@@ -3808,7 +135,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
3808 return 0; 135 return 0;
3809} 136}
3810 137
3811static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) 138static int run_lvds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
3812{ 139{
3813 /* 140 /*
3814 * The BIT LVDS table's header has the information to setup the 141 * The BIT LVDS table's header has the information to setup the
@@ -3820,8 +147,8 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3820 * conf byte. These tables are similar to the TMDS tables, consisting 147 * conf byte. These tables are similar to the TMDS tables, consisting
3821 * of a list of pxclks and script pointers. 148 * of a list of pxclks and script pointers.
3822 */ 149 */
3823 struct drm_nouveau_private *dev_priv = dev->dev_private; 150 struct nouveau_drm *drm = nouveau_drm(dev);
3824 struct nvbios *bios = &dev_priv->vbios; 151 struct nvbios *bios = &drm->vbios;
3825 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 152 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3826 uint16_t scriptptr = 0, clktable; 153 uint16_t scriptptr = 0, clktable;
3827 154
@@ -3866,14 +193,14 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3866 193
3867 clktable = ROM16(bios->data[clktable]); 194 clktable = ROM16(bios->data[clktable]);
3868 if (!clktable) { 195 if (!clktable) {
3869 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 196 NV_ERROR(drm, "Pixel clock comparison table not found\n");
3870 return -ENOENT; 197 return -ENOENT;
3871 } 198 }
3872 scriptptr = clkcmptable(bios, clktable, pxclk); 199 scriptptr = clkcmptable(bios, clktable, pxclk);
3873 } 200 }
3874 201
3875 if (!scriptptr) { 202 if (!scriptptr) {
3876 NV_ERROR(dev, "LVDS output init script not found\n"); 203 NV_ERROR(drm, "LVDS output init script not found\n");
3877 return -ENOENT; 204 return -ENOENT;
3878 } 205 }
3879 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link); 206 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
@@ -3881,7 +208,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3881 return 0; 208 return 0;
3882} 209}
3883 210
3884int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk) 211int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head, enum LVDS_script script, int pxclk)
3885{ 212{
3886 /* 213 /*
3887 * LVDS operations are multiplexed in an effort to present a single API 214 * LVDS operations are multiplexed in an effort to present a single API
@@ -3889,8 +216,9 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3889 * This acts as the demux 216 * This acts as the demux
3890 */ 217 */
3891 218
3892 struct drm_nouveau_private *dev_priv = dev->dev_private; 219 struct nouveau_drm *drm = nouveau_drm(dev);
3893 struct nvbios *bios = &dev_priv->vbios; 220 struct nouveau_device *device = nv_device(drm->device);
221 struct nvbios *bios = &drm->vbios;
3894 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; 222 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3895 uint32_t sel_clk_binding, sel_clk; 223 uint32_t sel_clk_binding, sel_clk;
3896 int ret; 224 int ret;
@@ -3909,10 +237,10 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3909 if (script == LVDS_RESET && bios->fp.power_off_for_reset) 237 if (script == LVDS_RESET && bios->fp.power_off_for_reset)
3910 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk); 238 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
3911 239
3912 NV_TRACE(dev, "Calling LVDS script %d:\n", script); 240 NV_INFO(drm, "Calling LVDS script %d:\n", script);
3913 241
3914 /* don't let script change pll->head binding */ 242 /* don't let script change pll->head binding */
3915 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; 243 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
3916 244
3917 if (lvds_ver < 0x30) 245 if (lvds_ver < 0x30)
3918 ret = call_lvds_manufacturer_script(dev, dcbent, head, script); 246 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -3924,7 +252,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
3924 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 252 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3925 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 253 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3926 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */ 254 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
3927 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); 255 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
3928 256
3929 return ret; 257 return ret;
3930} 258}
@@ -3942,12 +270,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3942 * the maximum number of records that can be held in the table. 270 * the maximum number of records that can be held in the table.
3943 */ 271 */
3944 272
273 struct nouveau_drm *drm = nouveau_drm(dev);
3945 uint8_t lvds_ver, headerlen, recordlen; 274 uint8_t lvds_ver, headerlen, recordlen;
3946 275
3947 memset(lth, 0, sizeof(struct lvdstableheader)); 276 memset(lth, 0, sizeof(struct lvdstableheader));
3948 277
3949 if (bios->fp.lvdsmanufacturerpointer == 0x0) { 278 if (bios->fp.lvdsmanufacturerpointer == 0x0) {
3950 NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n"); 279 NV_ERROR(drm, "Pointer to LVDS manufacturer table invalid\n");
3951 return -EINVAL; 280 return -EINVAL;
3952 } 281 }
3953 282
@@ -3961,7 +290,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3961 case 0x30: /* NV4x */ 290 case 0x30: /* NV4x */
3962 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; 291 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3963 if (headerlen < 0x1f) { 292 if (headerlen < 0x1f) {
3964 NV_ERROR(dev, "LVDS table header not understood\n"); 293 NV_ERROR(drm, "LVDS table header not understood\n");
3965 return -EINVAL; 294 return -EINVAL;
3966 } 295 }
3967 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; 296 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
@@ -3969,13 +298,13 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3969 case 0x40: /* G80/G90 */ 298 case 0x40: /* G80/G90 */
3970 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1]; 299 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3971 if (headerlen < 0x7) { 300 if (headerlen < 0x7) {
3972 NV_ERROR(dev, "LVDS table header not understood\n"); 301 NV_ERROR(drm, "LVDS table header not understood\n");
3973 return -EINVAL; 302 return -EINVAL;
3974 } 303 }
3975 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2]; 304 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3976 break; 305 break;
3977 default: 306 default:
3978 NV_ERROR(dev, 307 NV_ERROR(drm,
3979 "LVDS table revision %d.%d not currently supported\n", 308 "LVDS table revision %d.%d not currently supported\n",
3980 lvds_ver >> 4, lvds_ver & 0xf); 309 lvds_ver >> 4, lvds_ver & 0xf);
3981 return -ENOSYS; 310 return -ENOSYS;
@@ -3991,7 +320,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
3991static int 320static int
3992get_fp_strap(struct drm_device *dev, struct nvbios *bios) 321get_fp_strap(struct drm_device *dev, struct nvbios *bios)
3993{ 322{
3994 struct drm_nouveau_private *dev_priv = dev->dev_private; 323 struct nouveau_device *device = nouveau_dev(dev);
3995 324
3996 /* 325 /*
3997 * The fp strap is normally dictated by the "User Strap" in 326 * The fp strap is normally dictated by the "User Strap" in
@@ -4005,14 +334,15 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
4005 if (bios->major_version < 5 && bios->data[0x48] & 0x4) 334 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
4006 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; 335 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
4007 336
4008 if (dev_priv->card_type >= NV_50) 337 if (device->card_type >= NV_50)
4009 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; 338 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
4010 else 339 else
4011 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; 340 return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
4012} 341}
4013 342
4014static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) 343static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4015{ 344{
345 struct nouveau_drm *drm = nouveau_drm(dev);
4016 uint8_t *fptable; 346 uint8_t *fptable;
4017 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex; 347 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
4018 int ret, ofs, fpstrapping; 348 int ret, ofs, fpstrapping;
@@ -4022,7 +352,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4022 /* Apple cards don't have the fp table; the laptops use DDC */ 352 /* Apple cards don't have the fp table; the laptops use DDC */
4023 /* The table is also missing on some x86 IGPs */ 353 /* The table is also missing on some x86 IGPs */
4024#ifndef __powerpc__ 354#ifndef __powerpc__
4025 NV_ERROR(dev, "Pointer to flat panel table invalid\n"); 355 NV_ERROR(drm, "Pointer to flat panel table invalid\n");
4026#endif 356#endif
4027 bios->digital_min_front_porch = 0x4b; 357 bios->digital_min_front_porch = 0x4b;
4028 return 0; 358 return 0;
@@ -4061,7 +391,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4061 ofs = -7; 391 ofs = -7;
4062 break; 392 break;
4063 default: 393 default:
4064 NV_ERROR(dev, 394 NV_ERROR(drm,
4065 "FP table revision %d.%d not currently supported\n", 395 "FP table revision %d.%d not currently supported\n",
4066 fptable_ver >> 4, fptable_ver & 0xf); 396 fptable_ver >> 4, fptable_ver & 0xf);
4067 return -ENOSYS; 397 return -ENOSYS;
@@ -4080,7 +410,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4080 bios->fp.xlatwidth = lth.recordlen; 410 bios->fp.xlatwidth = lth.recordlen;
4081 } 411 }
4082 if (bios->fp.fpxlatetableptr == 0x0) { 412 if (bios->fp.fpxlatetableptr == 0x0) {
4083 NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n"); 413 NV_ERROR(drm, "Pointer to flat panel xlat table invalid\n");
4084 return -EINVAL; 414 return -EINVAL;
4085 } 415 }
4086 416
@@ -4090,7 +420,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4090 fpstrapping * bios->fp.xlatwidth]; 420 fpstrapping * bios->fp.xlatwidth];
4091 421
4092 if (fpindex > fpentries) { 422 if (fpindex > fpentries) {
4093 NV_ERROR(dev, "Bad flat panel table index\n"); 423 NV_ERROR(drm, "Bad flat panel table index\n");
4094 return -ENOENT; 424 return -ENOENT;
4095 } 425 }
4096 426
@@ -4109,7 +439,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4109 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen + 439 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
4110 recordlen * fpindex + ofs; 440 recordlen * fpindex + ofs;
4111 441
4112 NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n", 442 NV_INFO(drm, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
4113 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1, 443 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
4114 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1, 444 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
4115 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10); 445 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
@@ -4119,8 +449,8 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
4119 449
4120bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) 450bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
4121{ 451{
4122 struct drm_nouveau_private *dev_priv = dev->dev_private; 452 struct nouveau_drm *drm = nouveau_drm(dev);
4123 struct nvbios *bios = &dev_priv->vbios; 453 struct nvbios *bios = &drm->vbios;
4124 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; 454 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
4125 455
4126 if (!mode) /* just checking whether we can produce a mode */ 456 if (!mode) /* just checking whether we can produce a mode */
@@ -4190,8 +520,8 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4190 * requiring tests against the native-mode pixel clock, cannot be done 520 * requiring tests against the native-mode pixel clock, cannot be done
4191 * until later, when this function should be called with non-zero pxclk 521 * until later, when this function should be called with non-zero pxclk
4192 */ 522 */
4193 struct drm_nouveau_private *dev_priv = dev->dev_private; 523 struct nouveau_drm *drm = nouveau_drm(dev);
4194 struct nvbios *bios = &dev_priv->vbios; 524 struct nvbios *bios = &drm->vbios;
4195 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; 525 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
4196 struct lvdstableheader lth; 526 struct lvdstableheader lth;
4197 uint16_t lvdsofs; 527 uint16_t lvdsofs;
@@ -4252,7 +582,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4252 lvdsmanufacturerindex = fpstrapping; 582 lvdsmanufacturerindex = fpstrapping;
4253 break; 583 break;
4254 default: 584 default:
4255 NV_ERROR(dev, "LVDS table revision not currently supported\n"); 585 NV_ERROR(drm, "LVDS table revision not currently supported\n");
4256 return -ENOSYS; 586 return -ENOSYS;
4257 } 587 }
4258 588
@@ -4300,7 +630,7 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
4300 * This function returns true if a particular DCB entry matches. 630 * This function returns true if a particular DCB entry matches.
4301 */ 631 */
4302bool 632bool
4303bios_encoder_match(struct dcb_entry *dcb, u32 hash) 633bios_encoder_match(struct dcb_output *dcb, u32 hash)
4304{ 634{
4305 if ((hash & 0x000000f0) != (dcb->location << 4)) 635 if ((hash & 0x000000f0) != (dcb->location << 4))
4306 return false; 636 return false;
@@ -4310,9 +640,9 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
4310 return false; 640 return false;
4311 641
4312 switch (dcb->type) { 642 switch (dcb->type) {
4313 case OUTPUT_TMDS: 643 case DCB_OUTPUT_TMDS:
4314 case OUTPUT_LVDS: 644 case DCB_OUTPUT_LVDS:
4315 case OUTPUT_DP: 645 case DCB_OUTPUT_DP:
4316 if (hash & 0x00c00000) { 646 if (hash & 0x00c00000) {
4317 if (!(hash & (dcb->sorconf.link << 22))) 647 if (!(hash & (dcb->sorconf.link << 22)))
4318 return false; 648 return false;
@@ -4324,7 +654,7 @@ bios_encoder_match(struct dcb_entry *dcb, u32 hash)
4324 654
4325int 655int
4326nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk, 656nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4327 struct dcb_entry *dcbent, int crtc) 657 struct dcb_output *dcbent, int crtc)
4328{ 658{
4329 /* 659 /*
4330 * The display script table is located by the BIT 'U' table. 660 * The display script table is located by the BIT 'U' table.
@@ -4349,15 +679,15 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4349 * offset + 5 (16 bits): pointer to first output script table 679 * offset + 5 (16 bits): pointer to first output script table
4350 */ 680 */
4351 681
4352 struct drm_nouveau_private *dev_priv = dev->dev_private; 682 struct nouveau_drm *drm = nouveau_drm(dev);
4353 struct nvbios *bios = &dev_priv->vbios; 683 struct nvbios *bios = &drm->vbios;
4354 uint8_t *table = &bios->data[bios->display.script_table_ptr]; 684 uint8_t *table = &bios->data[bios->display.script_table_ptr];
4355 uint8_t *otable = NULL; 685 uint8_t *otable = NULL;
4356 uint16_t script; 686 uint16_t script;
4357 int i; 687 int i;
4358 688
4359 if (!bios->display.script_table_ptr) { 689 if (!bios->display.script_table_ptr) {
4360 NV_ERROR(dev, "No pointer to output script table\n"); 690 NV_ERROR(drm, "No pointer to output script table\n");
4361 return 1; 691 return 1;
4362 } 692 }
4363 693
@@ -4369,7 +699,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4369 return 1; 699 return 1;
4370 700
4371 if (table[0] != 0x20 && table[0] != 0x21) { 701 if (table[0] != 0x20 && table[0] != 0x21) {
4372 NV_ERROR(dev, "Output script table version 0x%02x unknown\n", 702 NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
4373 table[0]); 703 table[0]);
4374 return 1; 704 return 1;
4375 } 705 }
@@ -4404,7 +734,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4404 * script tables is a pointer to the script to execute. 734 * script tables is a pointer to the script to execute.
4405 */ 735 */
4406 736
4407 NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", 737 NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
4408 dcbent->type, dcbent->location, dcbent->or); 738 dcbent->type, dcbent->location, dcbent->or);
4409 for (i = 0; i < table[3]; i++) { 739 for (i = 0; i < table[3]; i++) {
4410 otable = ROMPTR(dev, table[table[1] + (i * table[2])]); 740 otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
@@ -4413,7 +743,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4413 } 743 }
4414 744
4415 if (!otable) { 745 if (!otable) {
4416 NV_DEBUG_KMS(dev, "failed to match any output table\n"); 746 NV_DEBUG(drm, "failed to match any output table\n");
4417 return 1; 747 return 1;
4418 } 748 }
4419 749
@@ -4425,7 +755,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4425 } 755 }
4426 756
4427 if (i == otable[5]) { 757 if (i == otable[5]) {
4428 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, " 758 NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
4429 "using first\n", 759 "using first\n",
4430 type, dcbent->type, dcbent->or); 760 type, dcbent->type, dcbent->or);
4431 i = 0; 761 i = 0;
@@ -4435,21 +765,21 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4435 if (pclk == 0) { 765 if (pclk == 0) {
4436 script = ROM16(otable[6]); 766 script = ROM16(otable[6]);
4437 if (!script) { 767 if (!script) {
4438 NV_DEBUG_KMS(dev, "output script 0 not found\n"); 768 NV_DEBUG(drm, "output script 0 not found\n");
4439 return 1; 769 return 1;
4440 } 770 }
4441 771
4442 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script); 772 NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
4443 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 773 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4444 } else 774 } else
4445 if (pclk == -1) { 775 if (pclk == -1) {
4446 script = ROM16(otable[8]); 776 script = ROM16(otable[8]);
4447 if (!script) { 777 if (!script) {
4448 NV_DEBUG_KMS(dev, "output script 1 not found\n"); 778 NV_DEBUG(drm, "output script 1 not found\n");
4449 return 1; 779 return 1;
4450 } 780 }
4451 781
4452 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script); 782 NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
4453 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 783 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4454 } else 784 } else
4455 if (pclk == -2) { 785 if (pclk == -2) {
@@ -4458,11 +788,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4458 else 788 else
4459 script = 0; 789 script = 0;
4460 if (!script) { 790 if (!script) {
4461 NV_DEBUG_KMS(dev, "output script 2 not found\n"); 791 NV_DEBUG(drm, "output script 2 not found\n");
4462 return 1; 792 return 1;
4463 } 793 }
4464 794
4465 NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script); 795 NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
4466 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 796 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4467 } else 797 } else
4468 if (pclk > 0) { 798 if (pclk > 0) {
@@ -4470,11 +800,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4470 if (script) 800 if (script)
4471 script = clkcmptable(bios, script, pclk); 801 script = clkcmptable(bios, script, pclk);
4472 if (!script) { 802 if (!script) {
4473 NV_DEBUG_KMS(dev, "clock script 0 not found\n"); 803 NV_DEBUG(drm, "clock script 0 not found\n");
4474 return 1; 804 return 1;
4475 } 805 }
4476 806
4477 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script); 807 NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
4478 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 808 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4479 } else 809 } else
4480 if (pclk < 0) { 810 if (pclk < 0) {
@@ -4482,11 +812,11 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4482 if (script) 812 if (script)
4483 script = clkcmptable(bios, script, -pclk); 813 script = clkcmptable(bios, script, -pclk);
4484 if (!script) { 814 if (!script) {
4485 NV_DEBUG_KMS(dev, "clock script 1 not found\n"); 815 NV_DEBUG(drm, "clock script 1 not found\n");
4486 return 1; 816 return 1;
4487 } 817 }
4488 818
4489 NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script); 819 NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
4490 nouveau_bios_run_init_table(dev, script, dcbent, crtc); 820 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
4491 } 821 }
4492 822
@@ -4494,7 +824,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
4494} 824}
4495 825
4496 826
4497int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk) 827int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
4498{ 828{
4499 /* 829 /*
4500 * the pxclk parameter is in kHz 830 * the pxclk parameter is in kHz
@@ -4505,8 +835,9 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4505 * ffs(or) == 3, use the second. 835 * ffs(or) == 3, use the second.
4506 */ 836 */
4507 837
4508 struct drm_nouveau_private *dev_priv = dev->dev_private; 838 struct nouveau_drm *drm = nouveau_drm(dev);
4509 struct nvbios *bios = &dev_priv->vbios; 839 struct nouveau_device *device = nv_device(drm->device);
840 struct nvbios *bios = &drm->vbios;
4510 int cv = bios->chip_version; 841 int cv = bios->chip_version;
4511 uint16_t clktable = 0, scriptptr; 842 uint16_t clktable = 0, scriptptr;
4512 uint32_t sel_clk_binding, sel_clk; 843 uint32_t sel_clk_binding, sel_clk;
@@ -4527,19 +858,19 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4527 } 858 }
4528 859
4529 if (!clktable) { 860 if (!clktable) {
4530 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 861 NV_ERROR(drm, "Pixel clock comparison table not found\n");
4531 return -EINVAL; 862 return -EINVAL;
4532 } 863 }
4533 864
4534 scriptptr = clkcmptable(bios, clktable, pxclk); 865 scriptptr = clkcmptable(bios, clktable, pxclk);
4535 866
4536 if (!scriptptr) { 867 if (!scriptptr) {
4537 NV_ERROR(dev, "TMDS output init script not found\n"); 868 NV_ERROR(drm, "TMDS output init script not found\n");
4538 return -ENOENT; 869 return -ENOENT;
4539 } 870 }
4540 871
4541 /* don't let script change pll->head binding */ 872 /* don't let script change pll->head binding */
4542 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000; 873 sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
4543 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000); 874 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
4544 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000; 875 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
4545 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding); 876 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -4547,447 +878,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
4547 return 0; 878 return 0;
4548} 879}
4549 880
4550struct pll_mapping {
4551 u8 type;
4552 u32 reg;
4553};
4554
4555static struct pll_mapping nv04_pll_mapping[] = {
4556 { PLL_CORE , NV_PRAMDAC_NVPLL_COEFF },
4557 { PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
4558 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4559 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4560 {}
4561};
4562
4563static struct pll_mapping nv40_pll_mapping[] = {
4564 { PLL_CORE , 0x004000 },
4565 { PLL_MEMORY, 0x004020 },
4566 { PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
4567 { PLL_VPLL1 , NV_RAMDAC_VPLL2 },
4568 {}
4569};
4570
4571static struct pll_mapping nv50_pll_mapping[] = {
4572 { PLL_CORE , 0x004028 },
4573 { PLL_SHADER, 0x004020 },
4574 { PLL_UNK03 , 0x004000 },
4575 { PLL_MEMORY, 0x004008 },
4576 { PLL_UNK40 , 0x00e810 },
4577 { PLL_UNK41 , 0x00e818 },
4578 { PLL_UNK42 , 0x00e824 },
4579 { PLL_VPLL0 , 0x614100 },
4580 { PLL_VPLL1 , 0x614900 },
4581 {}
4582};
4583
4584static struct pll_mapping nv84_pll_mapping[] = {
4585 { PLL_CORE , 0x004028 },
4586 { PLL_SHADER, 0x004020 },
4587 { PLL_MEMORY, 0x004008 },
4588 { PLL_VDEC , 0x004030 },
4589 { PLL_UNK41 , 0x00e818 },
4590 { PLL_VPLL0 , 0x614100 },
4591 { PLL_VPLL1 , 0x614900 },
4592 {}
4593};
4594
4595u32
4596get_pll_register(struct drm_device *dev, enum pll_types type)
4597{
4598 struct drm_nouveau_private *dev_priv = dev->dev_private;
4599 struct nvbios *bios = &dev_priv->vbios;
4600 struct pll_mapping *map;
4601 int i;
4602
4603 if (dev_priv->card_type < NV_40)
4604 map = nv04_pll_mapping;
4605 else
4606 if (dev_priv->card_type < NV_50)
4607 map = nv40_pll_mapping;
4608 else {
4609 u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
4610
4611 if (plim[0] >= 0x30) {
4612 u8 *entry = plim + plim[1];
4613 for (i = 0; i < plim[3]; i++, entry += plim[2]) {
4614 if (entry[0] == type)
4615 return ROM32(entry[3]);
4616 }
4617
4618 return 0;
4619 }
4620
4621 if (dev_priv->chipset == 0x50)
4622 map = nv50_pll_mapping;
4623 else
4624 map = nv84_pll_mapping;
4625 }
4626
4627 while (map->reg) {
4628 if (map->type == type)
4629 return map->reg;
4630 map++;
4631 }
4632
4633 return 0;
4634}
4635
4636int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
4637{
4638 /*
4639 * PLL limits table
4640 *
4641 * Version 0x10: NV30, NV31
4642 * One byte header (version), one record of 24 bytes
4643 * Version 0x11: NV36 - Not implemented
4644 * Seems to have same record style as 0x10, but 3 records rather than 1
4645 * Version 0x20: Found on Geforce 6 cards
4646 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
4647 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
4648 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
4649 * length in general, some (integrated) have an extra configuration byte
4650 * Version 0x30: Found on Geforce 8, separates the register mapping
4651 * from the limits tables.
4652 */
4653
4654 struct drm_nouveau_private *dev_priv = dev->dev_private;
4655 struct nvbios *bios = &dev_priv->vbios;
4656 int cv = bios->chip_version, pllindex = 0;
4657 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
4658 uint32_t crystal_strap_mask, crystal_straps;
4659
4660 if (!bios->pll_limit_tbl_ptr) {
4661 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
4662 cv >= 0x40) {
4663 NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
4664 return -EINVAL;
4665 }
4666 } else
4667 pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
4668
4669 crystal_strap_mask = 1 << 6;
4670 /* open coded dev->twoHeads test */
4671 if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
4672 crystal_strap_mask |= 1 << 22;
4673 crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
4674 crystal_strap_mask;
4675
4676 switch (pll_lim_ver) {
4677 /*
4678 * We use version 0 to indicate a pre limit table bios (single stage
4679 * pll) and load the hard coded limits instead.
4680 */
4681 case 0:
4682 break;
4683 case 0x10:
4684 case 0x11:
4685 /*
4686 * Strictly v0x11 has 3 entries, but the last two don't seem
4687 * to get used.
4688 */
4689 headerlen = 1;
4690 recordlen = 0x18;
4691 entries = 1;
4692 pllindex = 0;
4693 break;
4694 case 0x20:
4695 case 0x21:
4696 case 0x30:
4697 case 0x40:
4698 headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
4699 recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
4700 entries = bios->data[bios->pll_limit_tbl_ptr + 3];
4701 break;
4702 default:
4703 NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
4704 "supported\n", pll_lim_ver);
4705 return -ENOSYS;
4706 }
4707
4708 /* initialize all members to zero */
4709 memset(pll_lim, 0, sizeof(struct pll_lims));
4710
4711 /* if we were passed a type rather than a register, figure
4712 * out the register and store it
4713 */
4714 if (limit_match > PLL_MAX)
4715 pll_lim->reg = limit_match;
4716 else {
4717 pll_lim->reg = get_pll_register(dev, limit_match);
4718 if (!pll_lim->reg)
4719 return -ENOENT;
4720 }
4721
4722 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4723 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4724
4725 pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
4726 pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
4727 pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
4728 pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
4729 pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
4730 pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
4731 pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
4732
4733 /* these values taken from nv30/31/36 */
4734 pll_lim->vco1.min_n = 0x1;
4735 if (cv == 0x36)
4736 pll_lim->vco1.min_n = 0x5;
4737 pll_lim->vco1.max_n = 0xff;
4738 pll_lim->vco1.min_m = 0x1;
4739 pll_lim->vco1.max_m = 0xd;
4740 pll_lim->vco2.min_n = 0x4;
4741 /*
4742 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
4743 * table version (apart from nv35)), N2 is compared to
4744 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
4745 * save a comparison
4746 */
4747 pll_lim->vco2.max_n = 0x28;
4748 if (cv == 0x30 || cv == 0x35)
4749 /* only 5 bits available for N2 on nv30/35 */
4750 pll_lim->vco2.max_n = 0x1f;
4751 pll_lim->vco2.min_m = 0x1;
4752 pll_lim->vco2.max_m = 0x4;
4753 pll_lim->max_log2p = 0x7;
4754 pll_lim->max_usable_log2p = 0x6;
4755 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4756 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4757 uint8_t *pll_rec;
4758 int i;
4759
4760 /*
4761 * First entry is default match, if nothing better. warn if
4762 * reg field nonzero
4763 */
4764 if (ROM32(bios->data[plloffs]))
4765 NV_WARN(dev, "Default PLL limit entry has non-zero "
4766 "register field\n");
4767
4768 for (i = 1; i < entries; i++)
4769 if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
4770 pllindex = i;
4771 break;
4772 }
4773
4774 if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
4775 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4776 "limits table", pll_lim->reg);
4777 return -ENOENT;
4778 }
4779
4780 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4781
4782 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4783 pllindex ? pll_lim->reg : 0);
4784
4785 /*
4786 * Frequencies are stored in tables in MHz, kHz are more
4787 * useful, so we convert.
4788 */
4789
4790 /* What output frequencies can each VCO generate? */
4791 pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
4792 pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
4793 pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
4794 pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
4795
4796 /* What input frequencies they accept (past the m-divider)? */
4797 pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
4798 pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
4799 pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
4800 pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
4801
4802 /* What values are accepted as multiplier and divider? */
4803 pll_lim->vco1.min_n = pll_rec[20];
4804 pll_lim->vco1.max_n = pll_rec[21];
4805 pll_lim->vco1.min_m = pll_rec[22];
4806 pll_lim->vco1.max_m = pll_rec[23];
4807 pll_lim->vco2.min_n = pll_rec[24];
4808 pll_lim->vco2.max_n = pll_rec[25];
4809 pll_lim->vco2.min_m = pll_rec[26];
4810 pll_lim->vco2.max_m = pll_rec[27];
4811
4812 pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
4813 if (pll_lim->max_log2p > 0x7)
4814 /* pll decoding in nv_hw.c assumes never > 7 */
4815 NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
4816 pll_lim->max_log2p);
4817 if (cv < 0x60)
4818 pll_lim->max_usable_log2p = 0x6;
4819 pll_lim->log2p_bias = pll_rec[30];
4820
4821 if (recordlen > 0x22)
4822 pll_lim->refclk = ROM32(pll_rec[31]);
4823
4824 if (recordlen > 0x23 && pll_rec[35])
4825 NV_WARN(dev,
4826 "Bits set in PLL configuration byte (%x)\n",
4827 pll_rec[35]);
4828
4829 /* C51 special not seen elsewhere */
4830 if (cv == 0x51 && !pll_lim->refclk) {
4831 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4832
4833 if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
4834 (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
4835 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4836 pll_lim->refclk = 200000;
4837 else
4838 pll_lim->refclk = 25000;
4839 }
4840 }
4841 } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
4842 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4843 uint8_t *record = NULL;
4844 int i;
4845
4846 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4847 pll_lim->reg);
4848
4849 for (i = 0; i < entries; i++, entry += recordlen) {
4850 if (ROM32(entry[3]) == pll_lim->reg) {
4851 record = &bios->data[ROM16(entry[1])];
4852 break;
4853 }
4854 }
4855
4856 if (!record) {
4857 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4858 "limits table", pll_lim->reg);
4859 return -ENOENT;
4860 }
4861
4862 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4863 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4864 pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
4865 pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
4866 pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
4867 pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
4868 pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
4869 pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
4870 pll_lim->vco1.min_n = record[16];
4871 pll_lim->vco1.max_n = record[17];
4872 pll_lim->vco1.min_m = record[18];
4873 pll_lim->vco1.max_m = record[19];
4874 pll_lim->vco2.min_n = record[20];
4875 pll_lim->vco2.max_n = record[21];
4876 pll_lim->vco2.min_m = record[22];
4877 pll_lim->vco2.max_m = record[23];
4878 pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
4879 pll_lim->log2p_bias = record[27];
4880 pll_lim->refclk = ROM32(record[28]);
4881 } else if (pll_lim_ver) { /* ver 0x40 */
4882 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4883 uint8_t *record = NULL;
4884 int i;
4885
4886 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4887 pll_lim->reg);
4888
4889 for (i = 0; i < entries; i++, entry += recordlen) {
4890 if (ROM32(entry[3]) == pll_lim->reg) {
4891 record = &bios->data[ROM16(entry[1])];
4892 break;
4893 }
4894 }
4895
4896 if (!record) {
4897 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4898 "limits table", pll_lim->reg);
4899 return -ENOENT;
4900 }
4901
4902 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4903 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4904 pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
4905 pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
4906 pll_lim->vco1.min_m = record[8];
4907 pll_lim->vco1.max_m = record[9];
4908 pll_lim->vco1.min_n = record[10];
4909 pll_lim->vco1.max_n = record[11];
4910 pll_lim->min_p = record[12];
4911 pll_lim->max_p = record[13];
4912 pll_lim->refclk = ROM16(entry[9]) * 1000;
4913 }
4914
4915 /*
4916 * By now any valid limit table ought to have set a max frequency for
4917 * vco1, so if it's zero it's either a pre limit table bios, or one
4918 * with an empty limit table (seen on nv18)
4919 */
4920 if (!pll_lim->vco1.maxfreq) {
4921 pll_lim->vco1.minfreq = bios->fminvco;
4922 pll_lim->vco1.maxfreq = bios->fmaxvco;
4923 pll_lim->vco1.min_inputfreq = 0;
4924 pll_lim->vco1.max_inputfreq = INT_MAX;
4925 pll_lim->vco1.min_n = 0x1;
4926 pll_lim->vco1.max_n = 0xff;
4927 pll_lim->vco1.min_m = 0x1;
4928 if (crystal_straps == 0) {
4929 /* nv05 does this, nv11 doesn't, nv10 unknown */
4930 if (cv < 0x11)
4931 pll_lim->vco1.min_m = 0x7;
4932 pll_lim->vco1.max_m = 0xd;
4933 } else {
4934 if (cv < 0x11)
4935 pll_lim->vco1.min_m = 0x8;
4936 pll_lim->vco1.max_m = 0xe;
4937 }
4938 if (cv < 0x17 || cv == 0x1a || cv == 0x20)
4939 pll_lim->max_log2p = 4;
4940 else
4941 pll_lim->max_log2p = 5;
4942 pll_lim->max_usable_log2p = pll_lim->max_log2p;
4943 }
4944
4945 if (!pll_lim->refclk)
4946 switch (crystal_straps) {
4947 case 0:
4948 pll_lim->refclk = 13500;
4949 break;
4950 case (1 << 6):
4951 pll_lim->refclk = 14318;
4952 break;
4953 case (1 << 22):
4954 pll_lim->refclk = 27000;
4955 break;
4956 case (1 << 22 | 1 << 6):
4957 pll_lim->refclk = 25000;
4958 break;
4959 }
4960
4961 NV_DEBUG(dev, "pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4962 NV_DEBUG(dev, "pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4963 NV_DEBUG(dev, "pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4964 NV_DEBUG(dev, "pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4965 NV_DEBUG(dev, "pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4966 NV_DEBUG(dev, "pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4967 NV_DEBUG(dev, "pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4968 NV_DEBUG(dev, "pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4969 if (pll_lim->vco2.maxfreq) {
4970 NV_DEBUG(dev, "pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4971 NV_DEBUG(dev, "pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4972 NV_DEBUG(dev, "pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4973 NV_DEBUG(dev, "pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4974 NV_DEBUG(dev, "pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4975 NV_DEBUG(dev, "pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4976 NV_DEBUG(dev, "pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4977 NV_DEBUG(dev, "pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4978 }
4979 if (!pll_lim->max_p) {
4980 NV_DEBUG(dev, "pll.max_log2p: %d\n", pll_lim->max_log2p);
4981 NV_DEBUG(dev, "pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4982 } else {
4983 NV_DEBUG(dev, "pll.min_p: %d\n", pll_lim->min_p);
4984 NV_DEBUG(dev, "pll.max_p: %d\n", pll_lim->max_p);
4985 }
4986 NV_DEBUG(dev, "pll.refclk: %d\n", pll_lim->refclk);
4987
4988 return 0;
4989}
4990
4991static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset) 881static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
4992{ 882{
4993 /* 883 /*
@@ -4996,10 +886,11 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
4996 * offset + 2 (8 bits): Chip version 886 * offset + 2 (8 bits): Chip version
4997 * offset + 3 (8 bits): Major version 887 * offset + 3 (8 bits): Major version
4998 */ 888 */
889 struct nouveau_drm *drm = nouveau_drm(dev);
4999 890
5000 bios->major_version = bios->data[offset + 3]; 891 bios->major_version = bios->data[offset + 3];
5001 bios->chip_version = bios->data[offset + 2]; 892 bios->chip_version = bios->data[offset + 2];
5002 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", 893 NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
5003 bios->data[offset + 3], bios->data[offset + 2], 894 bios->data[offset + 3], bios->data[offset + 2],
5004 bios->data[offset + 1], bios->data[offset]); 895 bios->data[offset + 1], bios->data[offset]);
5005} 896}
@@ -5035,25 +926,26 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5035 * offset + 0 (16 bits): loadval table pointer 926 * offset + 0 (16 bits): loadval table pointer
5036 */ 927 */
5037 928
929 struct nouveau_drm *drm = nouveau_drm(dev);
5038 uint16_t load_table_ptr; 930 uint16_t load_table_ptr;
5039 uint8_t version, headerlen, entrylen, num_entries; 931 uint8_t version, headerlen, entrylen, num_entries;
5040 932
5041 if (bitentry->length != 3) { 933 if (bitentry->length != 3) {
5042 NV_ERROR(dev, "Do not understand BIT A table\n"); 934 NV_ERROR(drm, "Do not understand BIT A table\n");
5043 return -EINVAL; 935 return -EINVAL;
5044 } 936 }
5045 937
5046 load_table_ptr = ROM16(bios->data[bitentry->offset]); 938 load_table_ptr = ROM16(bios->data[bitentry->offset]);
5047 939
5048 if (load_table_ptr == 0x0) { 940 if (load_table_ptr == 0x0) {
5049 NV_DEBUG(dev, "Pointer to BIT loadval table invalid\n"); 941 NV_DEBUG(drm, "Pointer to BIT loadval table invalid\n");
5050 return -EINVAL; 942 return -EINVAL;
5051 } 943 }
5052 944
5053 version = bios->data[load_table_ptr]; 945 version = bios->data[load_table_ptr];
5054 946
5055 if (version != 0x10) { 947 if (version != 0x10) {
5056 NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n", 948 NV_ERROR(drm, "BIT loadval table version %d.%d not supported\n",
5057 version >> 4, version & 0xF); 949 version >> 4, version & 0xF);
5058 return -ENOSYS; 950 return -ENOSYS;
5059 } 951 }
@@ -5063,7 +955,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5063 num_entries = bios->data[load_table_ptr + 3]; 955 num_entries = bios->data[load_table_ptr + 3];
5064 956
5065 if (headerlen != 4 || entrylen != 4 || num_entries != 2) { 957 if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
5066 NV_ERROR(dev, "Do not understand BIT loadval table\n"); 958 NV_ERROR(drm, "Do not understand BIT loadval table\n");
5067 return -EINVAL; 959 return -EINVAL;
5068 } 960 }
5069 961
@@ -5080,9 +972,10 @@ static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5080 * 972 *
5081 * There's more in here, but that's unknown. 973 * There's more in here, but that's unknown.
5082 */ 974 */
975 struct nouveau_drm *drm = nouveau_drm(dev);
5083 976
5084 if (bitentry->length < 10) { 977 if (bitentry->length < 10) {
5085 NV_ERROR(dev, "Do not understand BIT C table\n"); 978 NV_ERROR(drm, "Do not understand BIT C table\n");
5086 return -EINVAL; 979 return -EINVAL;
5087 } 980 }
5088 981
@@ -5101,9 +994,10 @@ static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bi
5101 * records beginning with a freq. 994 * records beginning with a freq.
5102 * offset + 2 (16 bits): mode table pointer 995 * offset + 2 (16 bits): mode table pointer
5103 */ 996 */
997 struct nouveau_drm *drm = nouveau_drm(dev);
5104 998
5105 if (bitentry->length != 4) { 999 if (bitentry->length != 4) {
5106 NV_ERROR(dev, "Do not understand BIT display table\n"); 1000 NV_ERROR(drm, "Do not understand BIT display table\n");
5107 return -EINVAL; 1001 return -EINVAL;
5108 } 1002 }
5109 1003
@@ -5119,9 +1013,10 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5119 * 1013 *
5120 * See parse_script_table_pointers for layout 1014 * See parse_script_table_pointers for layout
5121 */ 1015 */
1016 struct nouveau_drm *drm = nouveau_drm(dev);
5122 1017
5123 if (bitentry->length < 14) { 1018 if (bitentry->length < 14) {
5124 NV_ERROR(dev, "Do not understand init table\n"); 1019 NV_ERROR(drm, "Do not understand init table\n");
5125 return -EINVAL; 1020 return -EINVAL;
5126 } 1021 }
5127 1022
@@ -5148,11 +1043,12 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5148 * There's other things in the table, purpose unknown 1043 * There's other things in the table, purpose unknown
5149 */ 1044 */
5150 1045
1046 struct nouveau_drm *drm = nouveau_drm(dev);
5151 uint16_t daccmpoffset; 1047 uint16_t daccmpoffset;
5152 uint8_t dacver, dacheaderlen; 1048 uint8_t dacver, dacheaderlen;
5153 1049
5154 if (bitentry->length < 6) { 1050 if (bitentry->length < 6) {
5155 NV_ERROR(dev, "BIT i table too short for needed information\n"); 1051 NV_ERROR(drm, "BIT i table too short for needed information\n");
5156 return -EINVAL; 1052 return -EINVAL;
5157 } 1053 }
5158 1054
@@ -5166,7 +1062,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5166 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE; 1062 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
5167 1063
5168 if (bitentry->length < 15) { 1064 if (bitentry->length < 15) {
5169 NV_WARN(dev, "BIT i table not long enough for DAC load " 1065 NV_WARN(drm, "BIT i table not long enough for DAC load "
5170 "detection comparison table\n"); 1066 "detection comparison table\n");
5171 return -EINVAL; 1067 return -EINVAL;
5172 } 1068 }
@@ -5187,7 +1083,7 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
5187 dacheaderlen = bios->data[daccmpoffset + 1]; 1083 dacheaderlen = bios->data[daccmpoffset + 1];
5188 1084
5189 if (dacver != 0x00 && dacver != 0x10) { 1085 if (dacver != 0x00 && dacver != 0x10) {
5190 NV_WARN(dev, "DAC load detection comparison table version " 1086 NV_WARN(drm, "DAC load detection comparison table version "
5191 "%d.%d not known\n", dacver >> 4, dacver & 0xf); 1087 "%d.%d not known\n", dacver >> 4, dacver & 0xf);
5192 return -ENOSYS; 1088 return -ENOSYS;
5193 } 1089 }
@@ -5207,8 +1103,10 @@ static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5207 * offset + 0 (16 bits): LVDS strap xlate table pointer 1103 * offset + 0 (16 bits): LVDS strap xlate table pointer
5208 */ 1104 */
5209 1105
1106 struct nouveau_drm *drm = nouveau_drm(dev);
1107
5210 if (bitentry->length != 2) { 1108 if (bitentry->length != 2) {
5211 NV_ERROR(dev, "Do not understand BIT LVDS table\n"); 1109 NV_ERROR(drm, "Do not understand BIT LVDS table\n");
5212 return -EINVAL; 1110 return -EINVAL;
5213 } 1111 }
5214 1112
@@ -5278,20 +1176,21 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5278 * "or" from the DCB. 1176 * "or" from the DCB.
5279 */ 1177 */
5280 1178
1179 struct nouveau_drm *drm = nouveau_drm(dev);
5281 uint16_t tmdstableptr, script1, script2; 1180 uint16_t tmdstableptr, script1, script2;
5282 1181
5283 if (bitentry->length != 2) { 1182 if (bitentry->length != 2) {
5284 NV_ERROR(dev, "Do not understand BIT TMDS table\n"); 1183 NV_ERROR(drm, "Do not understand BIT TMDS table\n");
5285 return -EINVAL; 1184 return -EINVAL;
5286 } 1185 }
5287 1186
5288 tmdstableptr = ROM16(bios->data[bitentry->offset]); 1187 tmdstableptr = ROM16(bios->data[bitentry->offset]);
5289 if (!tmdstableptr) { 1188 if (!tmdstableptr) {
5290 NV_ERROR(dev, "Pointer to TMDS table invalid\n"); 1189 NV_ERROR(drm, "Pointer to TMDS table invalid\n");
5291 return -EINVAL; 1190 return -EINVAL;
5292 } 1191 }
5293 1192
5294 NV_INFO(dev, "TMDS table version %d.%d\n", 1193 NV_INFO(drm, "TMDS table version %d.%d\n",
5295 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf); 1194 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
5296 1195
5297 /* nv50+ has v2.0, but we don't parse it atm */ 1196 /* nv50+ has v2.0, but we don't parse it atm */
@@ -5305,7 +1204,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5305 script1 = ROM16(bios->data[tmdstableptr + 7]); 1204 script1 = ROM16(bios->data[tmdstableptr + 7]);
5306 script2 = ROM16(bios->data[tmdstableptr + 9]); 1205 script2 = ROM16(bios->data[tmdstableptr + 9]);
5307 if (bios->data[script1] != 'q' || bios->data[script2] != 'q') 1206 if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
5308 NV_WARN(dev, "TMDS table script pointers not stubbed\n"); 1207 NV_WARN(drm, "TMDS table script pointers not stubbed\n");
5309 1208
5310 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]); 1209 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
5311 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]); 1210 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
@@ -5325,10 +1224,11 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
5325 * offset + 0 (16 bits): output script table pointer 1224 * offset + 0 (16 bits): output script table pointer
5326 */ 1225 */
5327 1226
1227 struct nouveau_drm *drm = nouveau_drm(dev);
5328 uint16_t outputscripttableptr; 1228 uint16_t outputscripttableptr;
5329 1229
5330 if (bitentry->length != 3) { 1230 if (bitentry->length != 3) {
5331 NV_ERROR(dev, "Do not understand BIT U table\n"); 1231 NV_ERROR(drm, "Do not understand BIT U table\n");
5332 return -EINVAL; 1232 return -EINVAL;
5333 } 1233 }
5334 1234
@@ -5347,8 +1247,8 @@ struct bit_table {
5347int 1247int
5348bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit) 1248bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
5349{ 1249{
5350 struct drm_nouveau_private *dev_priv = dev->dev_private; 1250 struct nouveau_drm *drm = nouveau_drm(dev);
5351 struct nvbios *bios = &dev_priv->vbios; 1251 struct nvbios *bios = &drm->vbios;
5352 u8 entries, *entry; 1252 u8 entries, *entry;
5353 1253
5354 if (bios->type != NVBIOS_BIT) 1254 if (bios->type != NVBIOS_BIT)
@@ -5377,12 +1277,13 @@ parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
5377 struct bit_table *table) 1277 struct bit_table *table)
5378{ 1278{
5379 struct drm_device *dev = bios->dev; 1279 struct drm_device *dev = bios->dev;
1280 struct nouveau_drm *drm = nouveau_drm(dev);
5380 struct bit_entry bitentry; 1281 struct bit_entry bitentry;
5381 1282
5382 if (bit_table(dev, table->id, &bitentry) == 0) 1283 if (bit_table(dev, table->id, &bitentry) == 0)
5383 return table->parse_fn(dev, bios, &bitentry); 1284 return table->parse_fn(dev, bios, &bitentry);
5384 1285
5385 NV_INFO(dev, "BIT table '%c' not found\n", table->id); 1286 NV_INFO(drm, "BIT table '%c' not found\n", table->id);
5386 return -ENOSYS; 1287 return -ENOSYS;
5387} 1288}
5388 1289
@@ -5462,6 +1363,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5462 * offset + 156: minimum pixel clock for LVDS dual link 1363 * offset + 156: minimum pixel clock for LVDS dual link
5463 */ 1364 */
5464 1365
1366 struct nouveau_drm *drm = nouveau_drm(dev);
5465 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor; 1367 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
5466 uint16_t bmplength; 1368 uint16_t bmplength;
5467 uint16_t legacy_scripts_offset, legacy_i2c_offset; 1369 uint16_t legacy_scripts_offset, legacy_i2c_offset;
@@ -5475,7 +1377,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5475 bmp_version_major = bmp[5]; 1377 bmp_version_major = bmp[5];
5476 bmp_version_minor = bmp[6]; 1378 bmp_version_minor = bmp[6];
5477 1379
5478 NV_TRACE(dev, "BMP version %d.%d\n", 1380 NV_INFO(drm, "BMP version %d.%d\n",
5479 bmp_version_major, bmp_version_minor); 1381 bmp_version_major, bmp_version_minor);
5480 1382
5481 /* 1383 /*
@@ -5491,7 +1393,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5491 * happened instead. 1393 * happened instead.
5492 */ 1394 */
5493 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) { 1395 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
5494 NV_ERROR(dev, "You have an unsupported BMP version. " 1396 NV_ERROR(drm, "You have an unsupported BMP version. "
5495 "Please send in your bios\n"); 1397 "Please send in your bios\n");
5496 return -ENOSYS; 1398 return -ENOSYS;
5497 } 1399 }
@@ -5540,7 +1442,7 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
5540 1442
5541 /* checksum */ 1443 /* checksum */
5542 if (nv_cksum(bmp, 8)) { 1444 if (nv_cksum(bmp, 8)) {
5543 NV_ERROR(dev, "Bad BMP checksum\n"); 1445 NV_ERROR(drm, "Bad BMP checksum\n");
5544 return -EINVAL; 1446 return -EINVAL;
5545 } 1447 }
5546 1448
@@ -5625,20 +1527,20 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5625} 1527}
5626 1528
5627void * 1529void *
5628dcb_table(struct drm_device *dev) 1530olddcb_table(struct drm_device *dev)
5629{ 1531{
5630 struct drm_nouveau_private *dev_priv = dev->dev_private; 1532 struct nouveau_drm *drm = nouveau_drm(dev);
5631 u8 *dcb = NULL; 1533 u8 *dcb = NULL;
5632 1534
5633 if (dev_priv->card_type > NV_04) 1535 if (nv_device(drm->device)->card_type > NV_04)
5634 dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]); 1536 dcb = ROMPTR(dev, drm->vbios.data[0x36]);
5635 if (!dcb) { 1537 if (!dcb) {
5636 NV_WARNONCE(dev, "No DCB data found in VBIOS\n"); 1538 NV_WARN(drm, "No DCB data found in VBIOS\n");
5637 return NULL; 1539 return NULL;
5638 } 1540 }
5639 1541
5640 if (dcb[0] >= 0x41) { 1542 if (dcb[0] >= 0x41) {
5641 NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]); 1543 NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
5642 return NULL; 1544 return NULL;
5643 } else 1545 } else
5644 if (dcb[0] >= 0x30) { 1546 if (dcb[0] >= 0x30) {
@@ -5670,18 +1572,18 @@ dcb_table(struct drm_device *dev)
5670 * 1572 *
5671 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful 1573 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5672 */ 1574 */
5673 NV_WARNONCE(dev, "No useful DCB data in VBIOS\n"); 1575 NV_WARN(drm, "No useful DCB data in VBIOS\n");
5674 return NULL; 1576 return NULL;
5675 } 1577 }
5676 1578
5677 NV_WARNONCE(dev, "DCB header validation failed\n"); 1579 NV_WARN(drm, "DCB header validation failed\n");
5678 return NULL; 1580 return NULL;
5679} 1581}
5680 1582
5681void * 1583void *
5682dcb_outp(struct drm_device *dev, u8 idx) 1584olddcb_outp(struct drm_device *dev, u8 idx)
5683{ 1585{
5684 u8 *dcb = dcb_table(dev); 1586 u8 *dcb = olddcb_table(dev);
5685 if (dcb && dcb[0] >= 0x30) { 1587 if (dcb && dcb[0] >= 0x30) {
5686 if (idx < dcb[2]) 1588 if (idx < dcb[2])
5687 return dcb + dcb[1] + (idx * dcb[3]); 1589 return dcb + dcb[1] + (idx * dcb[3]);
@@ -5703,20 +1605,20 @@ dcb_outp(struct drm_device *dev, u8 idx)
5703} 1605}
5704 1606
5705int 1607int
5706dcb_outp_foreach(struct drm_device *dev, void *data, 1608olddcb_outp_foreach(struct drm_device *dev, void *data,
5707 int (*exec)(struct drm_device *, void *, int idx, u8 *outp)) 1609 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
5708{ 1610{
5709 int ret, idx = -1; 1611 int ret, idx = -1;
5710 u8 *outp = NULL; 1612 u8 *outp = NULL;
5711 while ((outp = dcb_outp(dev, ++idx))) { 1613 while ((outp = olddcb_outp(dev, ++idx))) {
5712 if (ROM32(outp[0]) == 0x00000000) 1614 if (ROM32(outp[0]) == 0x00000000)
5713 break; /* seen on an NV11 with DCB v1.5 */ 1615 break; /* seen on an NV11 with DCB v1.5 */
5714 if (ROM32(outp[0]) == 0xffffffff) 1616 if (ROM32(outp[0]) == 0xffffffff)
5715 break; /* seen on an NV17 with DCB v2.0 */ 1617 break; /* seen on an NV17 with DCB v2.0 */
5716 1618
5717 if ((outp[0] & 0x0f) == OUTPUT_UNUSED) 1619 if ((outp[0] & 0x0f) == DCB_OUTPUT_UNUSED)
5718 continue; 1620 continue;
5719 if ((outp[0] & 0x0f) == OUTPUT_EOL) 1621 if ((outp[0] & 0x0f) == DCB_OUTPUT_EOL)
5720 break; 1622 break;
5721 1623
5722 ret = exec(dev, data, idx, outp); 1624 ret = exec(dev, data, idx, outp);
@@ -5728,9 +1630,9 @@ dcb_outp_foreach(struct drm_device *dev, void *data,
5728} 1630}
5729 1631
5730u8 * 1632u8 *
5731dcb_conntab(struct drm_device *dev) 1633olddcb_conntab(struct drm_device *dev)
5732{ 1634{
5733 u8 *dcb = dcb_table(dev); 1635 u8 *dcb = olddcb_table(dev);
5734 if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) { 1636 if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
5735 u8 *conntab = ROMPTR(dev, dcb[0x14]); 1637 u8 *conntab = ROMPTR(dev, dcb[0x14]);
5736 if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40) 1638 if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
@@ -5740,19 +1642,19 @@ dcb_conntab(struct drm_device *dev)
5740} 1642}
5741 1643
5742u8 * 1644u8 *
5743dcb_conn(struct drm_device *dev, u8 idx) 1645olddcb_conn(struct drm_device *dev, u8 idx)
5744{ 1646{
5745 u8 *conntab = dcb_conntab(dev); 1647 u8 *conntab = olddcb_conntab(dev);
5746 if (conntab && idx < conntab[2]) 1648 if (conntab && idx < conntab[2])
5747 return conntab + conntab[1] + (idx * conntab[3]); 1649 return conntab + conntab[1] + (idx * conntab[3]);
5748 return NULL; 1650 return NULL;
5749} 1651}
5750 1652
5751static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) 1653static struct dcb_output *new_dcb_entry(struct dcb_table *dcb)
5752{ 1654{
5753 struct dcb_entry *entry = &dcb->entry[dcb->entries]; 1655 struct dcb_output *entry = &dcb->entry[dcb->entries];
5754 1656
5755 memset(entry, 0, sizeof(struct dcb_entry)); 1657 memset(entry, 0, sizeof(struct dcb_output));
5756 entry->index = dcb->entries++; 1658 entry->index = dcb->entries++;
5757 1659
5758 return entry; 1660 return entry;
@@ -5761,20 +1663,22 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
5761static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, 1663static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
5762 int heads, int or) 1664 int heads, int or)
5763{ 1665{
5764 struct dcb_entry *entry = new_dcb_entry(dcb); 1666 struct dcb_output *entry = new_dcb_entry(dcb);
5765 1667
5766 entry->type = type; 1668 entry->type = type;
5767 entry->i2c_index = i2c; 1669 entry->i2c_index = i2c;
5768 entry->heads = heads; 1670 entry->heads = heads;
5769 if (type != OUTPUT_ANALOG) 1671 if (type != DCB_OUTPUT_ANALOG)
5770 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ 1672 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5771 entry->or = or; 1673 entry->or = or;
5772} 1674}
5773 1675
5774static bool 1676static bool
5775parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, 1677parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5776 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 1678 uint32_t conn, uint32_t conf, struct dcb_output *entry)
5777{ 1679{
1680 struct nouveau_drm *drm = nouveau_drm(dev);
1681
5778 entry->type = conn & 0xf; 1682 entry->type = conn & 0xf;
5779 entry->i2c_index = (conn >> 4) & 0xf; 1683 entry->i2c_index = (conn >> 4) & 0xf;
5780 entry->heads = (conn >> 8) & 0xf; 1684 entry->heads = (conn >> 8) & 0xf;
@@ -5784,7 +1688,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5784 entry->or = (conn >> 24) & 0xf; 1688 entry->or = (conn >> 24) & 0xf;
5785 1689
5786 switch (entry->type) { 1690 switch (entry->type) {
5787 case OUTPUT_ANALOG: 1691 case DCB_OUTPUT_ANALOG:
5788 /* 1692 /*
5789 * Although the rest of a CRT conf dword is usually 1693 * Although the rest of a CRT conf dword is usually
5790 * zeros, mac biosen have stuff there so we must mask 1694 * zeros, mac biosen have stuff there so we must mask
@@ -5793,7 +1697,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5793 (conf & 0xffff) * 10 : 1697 (conf & 0xffff) * 10 :
5794 (conf & 0xff) * 10000; 1698 (conf & 0xff) * 10000;
5795 break; 1699 break;
5796 case OUTPUT_LVDS: 1700 case DCB_OUTPUT_LVDS:
5797 { 1701 {
5798 uint32_t mask; 1702 uint32_t mask;
5799 if (conf & 0x1) 1703 if (conf & 0x1)
@@ -5828,12 +1732,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5828 if (dcb->version >= 0x40) 1732 if (dcb->version >= 0x40)
5829 break; 1733 break;
5830 1734
5831 NV_ERROR(dev, "Unknown LVDS configuration bits, " 1735 NV_ERROR(drm, "Unknown LVDS configuration bits, "
5832 "please report\n"); 1736 "please report\n");
5833 } 1737 }
5834 break; 1738 break;
5835 } 1739 }
5836 case OUTPUT_TV: 1740 case DCB_OUTPUT_TV:
5837 { 1741 {
5838 if (dcb->version >= 0x30) 1742 if (dcb->version >= 0x30)
5839 entry->tvconf.has_component_output = conf & (0x8 << 4); 1743 entry->tvconf.has_component_output = conf & (0x8 << 4);
@@ -5842,7 +1746,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5842 1746
5843 break; 1747 break;
5844 } 1748 }
5845 case OUTPUT_DP: 1749 case DCB_OUTPUT_DP:
5846 entry->dpconf.sor.link = (conf & 0x00000030) >> 4; 1750 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
5847 switch ((conf & 0x00e00000) >> 21) { 1751 switch ((conf & 0x00e00000) >> 21) {
5848 case 0: 1752 case 0:
@@ -5864,7 +1768,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5864 break; 1768 break;
5865 } 1769 }
5866 break; 1770 break;
5867 case OUTPUT_TMDS: 1771 case DCB_OUTPUT_TMDS:
5868 if (dcb->version >= 0x40) 1772 if (dcb->version >= 0x40)
5869 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; 1773 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
5870 else if (dcb->version >= 0x30) 1774 else if (dcb->version >= 0x30)
@@ -5873,7 +1777,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5873 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4; 1777 entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
5874 1778
5875 break; 1779 break;
5876 case OUTPUT_EOL: 1780 case DCB_OUTPUT_EOL:
5877 /* weird g80 mobile type that "nv" treats as a terminator */ 1781 /* weird g80 mobile type that "nv" treats as a terminator */
5878 dcb->entries--; 1782 dcb->entries--;
5879 return false; 1783 return false;
@@ -5900,27 +1804,29 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
5900 1804
5901static bool 1805static bool
5902parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, 1806parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5903 uint32_t conn, uint32_t conf, struct dcb_entry *entry) 1807 uint32_t conn, uint32_t conf, struct dcb_output *entry)
5904{ 1808{
1809 struct nouveau_drm *drm = nouveau_drm(dev);
1810
5905 switch (conn & 0x0000000f) { 1811 switch (conn & 0x0000000f) {
5906 case 0: 1812 case 0:
5907 entry->type = OUTPUT_ANALOG; 1813 entry->type = DCB_OUTPUT_ANALOG;
5908 break; 1814 break;
5909 case 1: 1815 case 1:
5910 entry->type = OUTPUT_TV; 1816 entry->type = DCB_OUTPUT_TV;
5911 break; 1817 break;
5912 case 2: 1818 case 2:
5913 case 4: 1819 case 4:
5914 if (conn & 0x10) 1820 if (conn & 0x10)
5915 entry->type = OUTPUT_LVDS; 1821 entry->type = DCB_OUTPUT_LVDS;
5916 else 1822 else
5917 entry->type = OUTPUT_TMDS; 1823 entry->type = DCB_OUTPUT_TMDS;
5918 break; 1824 break;
5919 case 3: 1825 case 3:
5920 entry->type = OUTPUT_LVDS; 1826 entry->type = DCB_OUTPUT_LVDS;
5921 break; 1827 break;
5922 default: 1828 default:
5923 NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f); 1829 NV_ERROR(drm, "Unknown DCB type %d\n", conn & 0x0000000f);
5924 return false; 1830 return false;
5925 } 1831 }
5926 1832
@@ -5932,13 +1838,13 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
5932 entry->duallink_possible = false; 1838 entry->duallink_possible = false;
5933 1839
5934 switch (entry->type) { 1840 switch (entry->type) {
5935 case OUTPUT_ANALOG: 1841 case DCB_OUTPUT_ANALOG:
5936 entry->crtconf.maxfreq = (conf & 0xffff) * 10; 1842 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5937 break; 1843 break;
5938 case OUTPUT_TV: 1844 case DCB_OUTPUT_TV:
5939 entry->tvconf.has_component_output = false; 1845 entry->tvconf.has_component_output = false;
5940 break; 1846 break;
5941 case OUTPUT_LVDS: 1847 case DCB_OUTPUT_LVDS:
5942 if ((conn & 0x00003f00) >> 8 != 0x10) 1848 if ((conn & 0x00003f00) >> 8 != 0x10)
5943 entry->lvdsconf.use_straps_for_mode = true; 1849 entry->lvdsconf.use_straps_for_mode = true;
5944 entry->lvdsconf.use_power_scripts = true; 1850 entry->lvdsconf.use_power_scripts = true;
@@ -5959,14 +1865,15 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5959 * more options 1865 * more options
5960 */ 1866 */
5961 1867
1868 struct nouveau_drm *drm = nouveau_drm(dev);
5962 int i, newentries = 0; 1869 int i, newentries = 0;
5963 1870
5964 for (i = 0; i < dcb->entries; i++) { 1871 for (i = 0; i < dcb->entries; i++) {
5965 struct dcb_entry *ient = &dcb->entry[i]; 1872 struct dcb_output *ient = &dcb->entry[i];
5966 int j; 1873 int j;
5967 1874
5968 for (j = i + 1; j < dcb->entries; j++) { 1875 for (j = i + 1; j < dcb->entries; j++) {
5969 struct dcb_entry *jent = &dcb->entry[j]; 1876 struct dcb_output *jent = &dcb->entry[j];
5970 1877
5971 if (jent->type == 100) /* already merged entry */ 1878 if (jent->type == 100) /* already merged entry */
5972 continue; 1879 continue;
@@ -5976,7 +1883,7 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
5976 jent->type == ient->type && 1883 jent->type == ient->type &&
5977 jent->location == ient->location && 1884 jent->location == ient->location &&
5978 jent->or == ient->or) { 1885 jent->or == ient->or) {
5979 NV_TRACE(dev, "Merging DCB entries %d and %d\n", 1886 NV_INFO(drm, "Merging DCB entries %d and %d\n",
5980 i, j); 1887 i, j);
5981 ient->heads |= jent->heads; 1888 ient->heads |= jent->heads;
5982 jent->type = 100; /* dummy value */ 1889 jent->type = 100; /* dummy value */
@@ -6002,8 +1909,8 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
6002static bool 1909static bool
6003apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) 1910apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6004{ 1911{
6005 struct drm_nouveau_private *dev_priv = dev->dev_private; 1912 struct nouveau_drm *drm = nouveau_drm(dev);
6006 struct dcb_table *dcb = &dev_priv->vbios.dcb; 1913 struct dcb_table *dcb = &drm->vbios.dcb;
6007 1914
6008 /* Dell Precision M6300 1915 /* Dell Precision M6300
6009 * DCB entry 2: 02025312 00000010 1916 * DCB entry 2: 02025312 00000010
@@ -6029,7 +1936,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
6029 */ 1936 */
6030 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) { 1937 if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
6031 if (*conn == 0xf2005014 && *conf == 0xffffffff) { 1938 if (*conn == 0xf2005014 && *conf == 0xffffffff) {
6032 fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1); 1939 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
6033 return false; 1940 return false;
6034 } 1941 }
6035 } 1942 }
@@ -6115,24 +2022,24 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6115#ifdef __powerpc__ 2022#ifdef __powerpc__
6116 /* Apple iMac G4 NV17 */ 2023 /* Apple iMac G4 NV17 */
6117 if (of_machine_is_compatible("PowerMac4,5")) { 2024 if (of_machine_is_compatible("PowerMac4,5")) {
6118 fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1); 2025 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
6119 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2); 2026 fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
6120 return; 2027 return;
6121 } 2028 }
6122#endif 2029#endif
6123 2030
6124 /* Make up some sane defaults */ 2031 /* Make up some sane defaults */
6125 fabricate_dcb_output(dcb, OUTPUT_ANALOG, 2032 fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
6126 bios->legacy.i2c_indices.crt, 1, 1); 2033 bios->legacy.i2c_indices.crt, 1, 1);
6127 2034
6128 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) 2035 if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
6129 fabricate_dcb_output(dcb, OUTPUT_TV, 2036 fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
6130 bios->legacy.i2c_indices.tv, 2037 bios->legacy.i2c_indices.tv,
6131 all_heads, 0); 2038 all_heads, 0);
6132 2039
6133 else if (bios->tmds.output0_script_ptr || 2040 else if (bios->tmds.output0_script_ptr ||
6134 bios->tmds.output1_script_ptr) 2041 bios->tmds.output1_script_ptr)
6135 fabricate_dcb_output(dcb, OUTPUT_TMDS, 2042 fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
6136 bios->legacy.i2c_indices.panel, 2043 bios->legacy.i2c_indices.panel,
6137 all_heads, 1); 2044 all_heads, 1);
6138} 2045}
@@ -6140,16 +2047,16 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
6140static int 2047static int
6141parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp) 2048parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
6142{ 2049{
6143 struct drm_nouveau_private *dev_priv = dev->dev_private; 2050 struct nouveau_drm *drm = nouveau_drm(dev);
6144 struct dcb_table *dcb = &dev_priv->vbios.dcb; 2051 struct dcb_table *dcb = &drm->vbios.dcb;
6145 u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]); 2052 u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
6146 u32 conn = ROM32(outp[0]); 2053 u32 conn = ROM32(outp[0]);
6147 bool ret; 2054 bool ret;
6148 2055
6149 if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) { 2056 if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
6150 struct dcb_entry *entry = new_dcb_entry(dcb); 2057 struct dcb_output *entry = new_dcb_entry(dcb);
6151 2058
6152 NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf); 2059 NV_INFO(drm, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
6153 2060
6154 if (dcb->version >= 0x20) 2061 if (dcb->version >= 0x20)
6155 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); 2062 ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
@@ -6162,7 +2069,7 @@ parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
6162 * are cards with bogus values (nv31m in bug 23212), 2069 * are cards with bogus values (nv31m in bug 23212),
6163 * and it's otherwise useless. 2070 * and it's otherwise useless.
6164 */ 2071 */
6165 if (entry->type == OUTPUT_TV && 2072 if (entry->type == DCB_OUTPUT_TV &&
6166 entry->location == DCB_LOC_ON_CHIP) 2073 entry->location == DCB_LOC_ON_CHIP)
6167 entry->i2c_index = 0x0f; 2074 entry->i2c_index = 0x0f;
6168 } 2075 }
@@ -6210,7 +2117,7 @@ dcb_fake_connectors(struct nvbios *bios)
6210 * table - just in case it has random, rather than stub, entries. 2117 * table - just in case it has random, rather than stub, entries.
6211 */ 2118 */
6212 if (i > 1) { 2119 if (i > 1) {
6213 u8 *conntab = dcb_conntab(bios->dev); 2120 u8 *conntab = olddcb_conntab(bios->dev);
6214 if (conntab) 2121 if (conntab)
6215 conntab[0] = 0x00; 2122 conntab[0] = 0x00;
6216 } 2123 }
@@ -6219,11 +2126,12 @@ dcb_fake_connectors(struct nvbios *bios)
6219static int 2126static int
6220parse_dcb_table(struct drm_device *dev, struct nvbios *bios) 2127parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6221{ 2128{
2129 struct nouveau_drm *drm = nouveau_drm(dev);
6222 struct dcb_table *dcb = &bios->dcb; 2130 struct dcb_table *dcb = &bios->dcb;
6223 u8 *dcbt, *conn; 2131 u8 *dcbt, *conn;
6224 int idx; 2132 int idx;
6225 2133
6226 dcbt = dcb_table(dev); 2134 dcbt = olddcb_table(dev);
6227 if (!dcbt) { 2135 if (!dcbt) {
6228 /* handle pre-DCB boards */ 2136 /* handle pre-DCB boards */
6229 if (bios->type == NVBIOS_BMP) { 2137 if (bios->type == NVBIOS_BMP) {
@@ -6234,10 +2142,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6234 return -EINVAL; 2142 return -EINVAL;
6235 } 2143 }
6236 2144
6237 NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf); 2145 NV_INFO(drm, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
6238 2146
6239 dcb->version = dcbt[0]; 2147 dcb->version = dcbt[0];
6240 dcb_outp_foreach(dev, NULL, parse_dcb_entry); 2148 olddcb_outp_foreach(dev, NULL, parse_dcb_entry);
6241 2149
6242 /* 2150 /*
6243 * apart for v2.1+ not being known for requiring merging, this 2151 * apart for v2.1+ not being known for requiring merging, this
@@ -6251,10 +2159,10 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
6251 2159
6252 /* dump connector table entries to log, if any exist */ 2160 /* dump connector table entries to log, if any exist */
6253 idx = -1; 2161 idx = -1;
6254 while ((conn = dcb_conn(dev, ++idx))) { 2162 while ((conn = olddcb_conn(dev, ++idx))) {
6255 if (conn[0] != 0xff) { 2163 if (conn[0] != 0xff) {
6256 NV_TRACE(dev, "DCB conn %02d: ", idx); 2164 NV_INFO(drm, "DCB conn %02d: ", idx);
6257 if (dcb_conntab(dev)[3] < 4) 2165 if (olddcb_conntab(dev)[3] < 4)
6258 printk("%04x\n", ROM16(conn[0])); 2166 printk("%04x\n", ROM16(conn[0]));
6259 else 2167 else
6260 printk("%08x\n", ROM32(conn[0])); 2168 printk("%08x\n", ROM32(conn[0]));
@@ -6275,12 +2183,14 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
6275 * starting at reg 0x00001400 2183 * starting at reg 0x00001400
6276 */ 2184 */
6277 2185
2186 struct nouveau_drm *drm = nouveau_drm(dev);
2187 struct nouveau_device *device = nv_device(drm->device);
6278 uint8_t bytes_to_write; 2188 uint8_t bytes_to_write;
6279 uint16_t hwsq_entry_offset; 2189 uint16_t hwsq_entry_offset;
6280 int i; 2190 int i;
6281 2191
6282 if (bios->data[hwsq_offset] <= entry) { 2192 if (bios->data[hwsq_offset] <= entry) {
6283 NV_ERROR(dev, "Too few entries in HW sequencer table for " 2193 NV_ERROR(drm, "Too few entries in HW sequencer table for "
6284 "requested entry\n"); 2194 "requested entry\n");
6285 return -ENOENT; 2195 return -ENOENT;
6286 } 2196 }
@@ -6288,24 +2198,24 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
6288 bytes_to_write = bios->data[hwsq_offset + 1]; 2198 bytes_to_write = bios->data[hwsq_offset + 1];
6289 2199
6290 if (bytes_to_write != 36) { 2200 if (bytes_to_write != 36) {
6291 NV_ERROR(dev, "Unknown HW sequencer entry size\n"); 2201 NV_ERROR(drm, "Unknown HW sequencer entry size\n");
6292 return -EINVAL; 2202 return -EINVAL;
6293 } 2203 }
6294 2204
6295 NV_TRACE(dev, "Loading NV17 power sequencing microcode\n"); 2205 NV_INFO(drm, "Loading NV17 power sequencing microcode\n");
6296 2206
6297 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write; 2207 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
6298 2208
6299 /* set sequencer control */ 2209 /* set sequencer control */
6300 bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset])); 2210 nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
6301 bytes_to_write -= 4; 2211 bytes_to_write -= 4;
6302 2212
6303 /* write ucode */ 2213 /* write ucode */
6304 for (i = 0; i < bytes_to_write; i += 4) 2214 for (i = 0; i < bytes_to_write; i += 4)
6305 bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4])); 2215 nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
6306 2216
6307 /* twiddle NV_PBUS_DEBUG_4 */ 2217 /* twiddle NV_PBUS_DEBUG_4 */
6308 bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18); 2218 nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
6309 2219
6310 return 0; 2220 return 0;
6311} 2221}
@@ -6336,8 +2246,8 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
6336 2246
6337uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) 2247uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
6338{ 2248{
6339 struct drm_nouveau_private *dev_priv = dev->dev_private; 2249 struct nouveau_drm *drm = nouveau_drm(dev);
6340 struct nvbios *bios = &dev_priv->vbios; 2250 struct nvbios *bios = &drm->vbios;
6341 const uint8_t edid_sig[] = { 2251 const uint8_t edid_sig[] = {
6342 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 2252 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
6343 uint16_t offset = 0; 2253 uint16_t offset = 0;
@@ -6360,53 +2270,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
6360 offset++; 2270 offset++;
6361 } 2271 }
6362 2272
6363 NV_TRACE(dev, "Found EDID in BIOS\n"); 2273 NV_INFO(drm, "Found EDID in BIOS\n");
6364 2274
6365 return bios->fp.edid = &bios->data[offset]; 2275 return bios->fp.edid = &bios->data[offset];
6366} 2276}
6367 2277
6368void
6369nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
6370 struct dcb_entry *dcbent, int crtc)
6371{
6372 struct drm_nouveau_private *dev_priv = dev->dev_private;
6373 struct nvbios *bios = &dev_priv->vbios;
6374 struct init_exec iexec = { true, false };
6375
6376 spin_lock_bh(&bios->lock);
6377 bios->display.output = dcbent;
6378 bios->display.crtc = crtc;
6379 parse_init_table(bios, table, &iexec);
6380 bios->display.output = NULL;
6381 spin_unlock_bh(&bios->lock);
6382}
6383
6384void
6385nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
6386{
6387 struct drm_nouveau_private *dev_priv = dev->dev_private;
6388 struct nvbios *bios = &dev_priv->vbios;
6389 struct init_exec iexec = { true, false };
6390
6391 parse_init_table(bios, table, &iexec);
6392}
6393
6394static bool NVInitVBIOS(struct drm_device *dev) 2278static bool NVInitVBIOS(struct drm_device *dev)
6395{ 2279{
6396 struct drm_nouveau_private *dev_priv = dev->dev_private; 2280 struct nouveau_drm *drm = nouveau_drm(dev);
6397 struct nvbios *bios = &dev_priv->vbios; 2281 struct nvbios *bios = &drm->vbios;
6398 2282
6399 memset(bios, 0, sizeof(struct nvbios)); 2283 memset(bios, 0, sizeof(struct nvbios));
6400 spin_lock_init(&bios->lock); 2284 spin_lock_init(&bios->lock);
6401 bios->dev = dev; 2285 bios->dev = dev;
6402 2286
6403 return bios_shadow(dev); 2287 bios->data = nouveau_bios(drm->device)->data;
2288 bios->length = nouveau_bios(drm->device)->size;
2289 return true;
6404} 2290}
6405 2291
6406static int nouveau_parse_vbios_struct(struct drm_device *dev) 2292static int nouveau_parse_vbios_struct(struct drm_device *dev)
6407{ 2293{
6408 struct drm_nouveau_private *dev_priv = dev->dev_private; 2294 struct nouveau_drm *drm = nouveau_drm(dev);
6409 struct nvbios *bios = &dev_priv->vbios; 2295 struct nvbios *bios = &drm->vbios;
6410 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; 2296 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
6411 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; 2297 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
6412 int offset; 2298 int offset;
@@ -6414,7 +2300,7 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6414 offset = findstr(bios->data, bios->length, 2300 offset = findstr(bios->data, bios->length,
6415 bit_signature, sizeof(bit_signature)); 2301 bit_signature, sizeof(bit_signature));
6416 if (offset) { 2302 if (offset) {
6417 NV_TRACE(dev, "BIT BIOS found\n"); 2303 NV_INFO(drm, "BIT BIOS found\n");
6418 bios->type = NVBIOS_BIT; 2304 bios->type = NVBIOS_BIT;
6419 bios->offset = offset; 2305 bios->offset = offset;
6420 return parse_bit_structure(bios, offset + 6); 2306 return parse_bit_structure(bios, offset + 6);
@@ -6423,21 +2309,21 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
6423 offset = findstr(bios->data, bios->length, 2309 offset = findstr(bios->data, bios->length,
6424 bmp_signature, sizeof(bmp_signature)); 2310 bmp_signature, sizeof(bmp_signature));
6425 if (offset) { 2311 if (offset) {
6426 NV_TRACE(dev, "BMP BIOS found\n"); 2312 NV_INFO(drm, "BMP BIOS found\n");
6427 bios->type = NVBIOS_BMP; 2313 bios->type = NVBIOS_BMP;
6428 bios->offset = offset; 2314 bios->offset = offset;
6429 return parse_bmp_structure(dev, bios, offset); 2315 return parse_bmp_structure(dev, bios, offset);
6430 } 2316 }
6431 2317
6432 NV_ERROR(dev, "No known BIOS signature found\n"); 2318 NV_ERROR(drm, "No known BIOS signature found\n");
6433 return -ENODEV; 2319 return -ENODEV;
6434} 2320}
6435 2321
6436int 2322int
6437nouveau_run_vbios_init(struct drm_device *dev) 2323nouveau_run_vbios_init(struct drm_device *dev)
6438{ 2324{
6439 struct drm_nouveau_private *dev_priv = dev->dev_private; 2325 struct nouveau_drm *drm = nouveau_drm(dev);
6440 struct nvbios *bios = &dev_priv->vbios; 2326 struct nvbios *bios = &drm->vbios;
6441 int i, ret = 0; 2327 int i, ret = 0;
6442 2328
6443 /* Reset the BIOS head to 0. */ 2329 /* Reset the BIOS head to 0. */
@@ -6451,23 +2337,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
6451 bios->fp.lvds_init_run = false; 2337 bios->fp.lvds_init_run = false;
6452 } 2338 }
6453 2339
6454 parse_init_tables(bios); 2340 if (nv_device(drm->device)->card_type >= NV_50) {
6455 2341 for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
6456 /*
6457 * Runs some additional script seen on G8x VBIOSen. The VBIOS'
6458 * parser will run this right after the init tables, the binary
6459 * driver appears to run it at some point later.
6460 */
6461 if (bios->some_script_ptr) {
6462 struct init_exec iexec = {true, false};
6463
6464 NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
6465 bios->some_script_ptr);
6466 parse_init_table(bios, bios->some_script_ptr, &iexec);
6467 }
6468
6469 if (dev_priv->card_type >= NV_50) {
6470 for (i = 0; i < bios->dcb.entries; i++) {
6471 nouveau_bios_run_display_table(dev, 0, 0, 2342 nouveau_bios_run_display_table(dev, 0, 0,
6472 &bios->dcb.entry[i], -1); 2343 &bios->dcb.entry[i], -1);
6473 } 2344 }
@@ -6479,10 +2350,10 @@ nouveau_run_vbios_init(struct drm_device *dev)
6479static bool 2350static bool
6480nouveau_bios_posted(struct drm_device *dev) 2351nouveau_bios_posted(struct drm_device *dev)
6481{ 2352{
6482 struct drm_nouveau_private *dev_priv = dev->dev_private; 2353 struct nouveau_drm *drm = nouveau_drm(dev);
6483 unsigned htotal; 2354 unsigned htotal;
6484 2355
6485 if (dev_priv->card_type >= NV_50) { 2356 if (nv_device(drm->device)->card_type >= NV_50) {
6486 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && 2357 if (NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6487 NVReadVgaCrtc(dev, 0, 0x1a) == 0) 2358 NVReadVgaCrtc(dev, 0, 0x1a) == 0)
6488 return false; 2359 return false;
@@ -6501,8 +2372,8 @@ nouveau_bios_posted(struct drm_device *dev)
6501int 2372int
6502nouveau_bios_init(struct drm_device *dev) 2373nouveau_bios_init(struct drm_device *dev)
6503{ 2374{
6504 struct drm_nouveau_private *dev_priv = dev->dev_private; 2375 struct nouveau_drm *drm = nouveau_drm(dev);
6505 struct nvbios *bios = &dev_priv->vbios; 2376 struct nvbios *bios = &drm->vbios;
6506 int ret; 2377 int ret;
6507 2378
6508 if (!NVInitVBIOS(dev)) 2379 if (!NVInitVBIOS(dev))
@@ -6512,14 +2383,6 @@ nouveau_bios_init(struct drm_device *dev)
6512 if (ret) 2383 if (ret)
6513 return ret; 2384 return ret;
6514 2385
6515 ret = nouveau_i2c_init(dev);
6516 if (ret)
6517 return ret;
6518
6519 ret = nouveau_mxm_init(dev);
6520 if (ret)
6521 return ret;
6522
6523 ret = parse_dcb_table(dev, bios); 2386 ret = parse_dcb_table(dev, bios);
6524 if (ret) 2387 if (ret)
6525 return ret; 2388 return ret;
@@ -6532,12 +2395,10 @@ nouveau_bios_init(struct drm_device *dev)
6532 2395
6533 /* ... unless card isn't POSTed already */ 2396 /* ... unless card isn't POSTed already */
6534 if (!nouveau_bios_posted(dev)) { 2397 if (!nouveau_bios_posted(dev)) {
6535 NV_INFO(dev, "Adaptor not initialised, " 2398 NV_INFO(drm, "Adaptor not initialised, "
6536 "running VBIOS init tables.\n"); 2399 "running VBIOS init tables.\n");
6537 bios->execute = true; 2400 bios->execute = true;
6538 } 2401 }
6539 if (nouveau_force_post)
6540 bios->execute = true;
6541 2402
6542 ret = nouveau_run_vbios_init(dev); 2403 ret = nouveau_run_vbios_init(dev);
6543 if (ret) 2404 if (ret)
@@ -6560,10 +2421,4 @@ nouveau_bios_init(struct drm_device *dev)
6560void 2421void
6561nouveau_bios_takedown(struct drm_device *dev) 2422nouveau_bios_takedown(struct drm_device *dev)
6562{ 2423{
6563 struct drm_nouveau_private *dev_priv = dev->dev_private;
6564
6565 nouveau_mxm_fini(dev);
6566 nouveau_i2c_fini(dev);
6567
6568 kfree(dev_priv->vbios.data);
6569} 2424}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 298a3af48d14..3befbb821a56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -21,11 +21,10 @@
21 * DEALINGS IN THE SOFTWARE. 21 * DEALINGS IN THE SOFTWARE.
22 */ 22 */
23 23
24#ifndef __NOUVEAU_BIOS_H__ 24#ifndef __NOUVEAU_DISPBIOS_H__
25#define __NOUVEAU_BIOS_H__ 25#define __NOUVEAU_DISPBIOS_H__
26 26
27#include "nvreg.h" 27#include "nvreg.h"
28#include "nouveau_i2c.h"
29 28
30#define DCB_MAX_NUM_ENTRIES 16 29#define DCB_MAX_NUM_ENTRIES 16
31#define DCB_MAX_NUM_I2C_ENTRIES 16 30#define DCB_MAX_NUM_I2C_ENTRIES 16
@@ -39,8 +38,8 @@
39#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); }) 38#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
40#define ROM64(x) le64_to_cpu(*(u64 *)&(x)) 39#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
41#define ROMPTR(d,x) ({ \ 40#define ROMPTR(d,x) ({ \
42 struct drm_nouveau_private *dev_priv = (d)->dev_private; \ 41 struct nouveau_drm *drm = nouveau_drm((d)); \
43 ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \ 42 ROM16(x) ? &drm->vbios.data[ROM16(x)] : NULL; \
44}) 43})
45 44
46struct bit_entry { 45struct bit_entry {
@@ -53,95 +52,19 @@ struct bit_entry {
53 52
54int bit_table(struct drm_device *, u8 id, struct bit_entry *); 53int bit_table(struct drm_device *, u8 id, struct bit_entry *);
55 54
56enum dcb_gpio_tag { 55#include <subdev/bios/dcb.h>
57 DCB_GPIO_PANEL_POWER = 0x01, 56#include <subdev/bios/conn.h>
58 DCB_GPIO_TVDAC0 = 0x0c,
59 DCB_GPIO_TVDAC1 = 0x2d,
60 DCB_GPIO_PWM_FAN = 0x09,
61 DCB_GPIO_FAN_SENSE = 0x3d,
62 DCB_GPIO_UNUSED = 0xff
63};
64
65enum dcb_connector_type {
66 DCB_CONNECTOR_VGA = 0x00,
67 DCB_CONNECTOR_TV_0 = 0x10,
68 DCB_CONNECTOR_TV_1 = 0x11,
69 DCB_CONNECTOR_TV_3 = 0x13,
70 DCB_CONNECTOR_DVI_I = 0x30,
71 DCB_CONNECTOR_DVI_D = 0x31,
72 DCB_CONNECTOR_DMS59_0 = 0x38,
73 DCB_CONNECTOR_DMS59_1 = 0x39,
74 DCB_CONNECTOR_LVDS = 0x40,
75 DCB_CONNECTOR_LVDS_SPWG = 0x41,
76 DCB_CONNECTOR_DP = 0x46,
77 DCB_CONNECTOR_eDP = 0x47,
78 DCB_CONNECTOR_HDMI_0 = 0x60,
79 DCB_CONNECTOR_HDMI_1 = 0x61,
80 DCB_CONNECTOR_DMS59_DP0 = 0x64,
81 DCB_CONNECTOR_DMS59_DP1 = 0x65,
82 DCB_CONNECTOR_NONE = 0xff
83};
84
85enum dcb_type {
86 OUTPUT_ANALOG = 0,
87 OUTPUT_TV = 1,
88 OUTPUT_TMDS = 2,
89 OUTPUT_LVDS = 3,
90 OUTPUT_DP = 6,
91 OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
92 OUTPUT_UNUSED = 15,
93 OUTPUT_ANY = -1
94};
95
96struct dcb_entry {
97 int index; /* may not be raw dcb index if merging has happened */
98 enum dcb_type type;
99 uint8_t i2c_index;
100 uint8_t heads;
101 uint8_t connector;
102 uint8_t bus;
103 uint8_t location;
104 uint8_t or;
105 bool duallink_possible;
106 union {
107 struct sor_conf {
108 int link;
109 } sorconf;
110 struct {
111 int maxfreq;
112 } crtconf;
113 struct {
114 struct sor_conf sor;
115 bool use_straps_for_mode;
116 bool use_acpi_for_edid;
117 bool use_power_scripts;
118 } lvdsconf;
119 struct {
120 bool has_component_output;
121 } tvconf;
122 struct {
123 struct sor_conf sor;
124 int link_nr;
125 int link_bw;
126 } dpconf;
127 struct {
128 struct sor_conf sor;
129 int slave_addr;
130 } tmdsconf;
131 };
132 bool i2c_upper_default;
133};
134 57
135struct dcb_table { 58struct dcb_table {
136 uint8_t version; 59 uint8_t version;
137 int entries; 60 int entries;
138 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; 61 struct dcb_output entry[DCB_MAX_NUM_ENTRIES];
139}; 62};
140 63
141enum nouveau_or { 64enum nouveau_or {
142 OUTPUT_A = (1 << 0), 65 DCB_OUTPUT_A = (1 << 0),
143 OUTPUT_B = (1 << 1), 66 DCB_OUTPUT_B = (1 << 1),
144 OUTPUT_C = (1 << 2) 67 DCB_OUTPUT_C = (1 << 2)
145}; 68};
146 69
147enum LVDS_script { 70enum LVDS_script {
@@ -154,58 +77,6 @@ enum LVDS_script {
154 LVDS_PANEL_OFF 77 LVDS_PANEL_OFF
155}; 78};
156 79
157/* these match types in pll limits table version 0x40,
158 * nouveau uses them on all chipsets internally where a
159 * specific pll needs to be referenced, but the exact
160 * register isn't known.
161 */
162enum pll_types {
163 PLL_CORE = 0x01,
164 PLL_SHADER = 0x02,
165 PLL_UNK03 = 0x03,
166 PLL_MEMORY = 0x04,
167 PLL_VDEC = 0x05,
168 PLL_UNK40 = 0x40,
169 PLL_UNK41 = 0x41,
170 PLL_UNK42 = 0x42,
171 PLL_VPLL0 = 0x80,
172 PLL_VPLL1 = 0x81,
173 PLL_MAX = 0xff
174};
175
176struct pll_lims {
177 u32 reg;
178
179 struct {
180 int minfreq;
181 int maxfreq;
182 int min_inputfreq;
183 int max_inputfreq;
184
185 uint8_t min_m;
186 uint8_t max_m;
187 uint8_t min_n;
188 uint8_t max_n;
189 } vco1, vco2;
190
191 uint8_t max_log2p;
192 /*
193 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
194 * value) is no different to 6 (at least for vplls) so allowing the MNP
195 * calc to use 7 causes the generated clock to be out by a factor of 2.
196 * however, max_log2p cannot be fixed-up during parsing as the
197 * unmodified max_log2p value is still needed for setting mplls, hence
198 * an additional max_usable_log2p member
199 */
200 uint8_t max_usable_log2p;
201 uint8_t log2p_bias;
202
203 uint8_t min_p;
204 uint8_t max_p;
205
206 int refclk;
207};
208
209struct nvbios { 80struct nvbios {
210 struct drm_device *dev; 81 struct drm_device *dev;
211 enum { 82 enum {
@@ -257,7 +128,7 @@ struct nvbios {
257 } state; 128 } state;
258 129
259 struct { 130 struct {
260 struct dcb_entry *output; 131 struct dcb_output *output;
261 int crtc; 132 int crtc;
262 uint16_t script_table_ptr; 133 uint16_t script_table_ptr;
263 } display; 134 } display;
@@ -302,11 +173,28 @@ struct nvbios {
302 } legacy; 173 } legacy;
303}; 174};
304 175
305void *dcb_table(struct drm_device *); 176void *olddcb_table(struct drm_device *);
306void *dcb_outp(struct drm_device *, u8 idx); 177void *olddcb_outp(struct drm_device *, u8 idx);
307int dcb_outp_foreach(struct drm_device *, void *data, 178int olddcb_outp_foreach(struct drm_device *, void *data,
308 int (*)(struct drm_device *, void *, int idx, u8 *outp)); 179 int (*)(struct drm_device *, void *, int idx, u8 *outp));
309u8 *dcb_conntab(struct drm_device *); 180u8 *olddcb_conntab(struct drm_device *);
310u8 *dcb_conn(struct drm_device *, u8 idx); 181u8 *olddcb_conn(struct drm_device *, u8 idx);
182
183int nouveau_bios_init(struct drm_device *);
184void nouveau_bios_takedown(struct drm_device *dev);
185int nouveau_run_vbios_init(struct drm_device *);
186struct dcb_connector_table_entry *
187nouveau_bios_connector_entry(struct drm_device *, int index);
188int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
189 struct dcb_output *, int crtc);
190bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
191uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
192int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
193 bool *dl, bool *if_is_24bit);
194int run_tmds_table(struct drm_device *, struct dcb_output *,
195 int head, int pxclk);
196int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
197 enum LVDS_script, int pxclk);
198bool bios_encoder_match(struct dcb_output *, u32 hash);
311 199
312#endif 200#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4ee2e7ff92d2..259e5f1adf47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,31 +27,127 @@
27 * Jeremy Kolb <jkolb@brandeis.edu> 27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */ 28 */
29 29
30#include <drm/drmP.h> 30#include <core/engine.h>
31#include <drm/ttm/ttm_page_alloc.h>
32 31
33#include <drm/nouveau_drm.h> 32#include <subdev/fb.h>
34#include "nouveau_drv.h" 33#include <subdev/vm.h>
34#include <subdev/bar.h>
35
36#include "nouveau_drm.h"
35#include "nouveau_dma.h" 37#include "nouveau_dma.h"
36#include "nouveau_mm.h"
37#include "nouveau_vm.h"
38#include "nouveau_fence.h" 38#include "nouveau_fence.h"
39#include "nouveau_ramht.h"
40 39
41#include <linux/log2.h> 40#include "nouveau_bo.h"
42#include <linux/slab.h> 41#include "nouveau_ttm.h"
42#include "nouveau_gem.h"
43
44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
49nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50 u32 addr, u32 size, u32 pitch, u32 flags)
51{
52 struct nouveau_drm *drm = nouveau_drm(dev);
53 int i = reg - drm->tile.reg;
54 struct nouveau_fb *pfb = nouveau_fb(drm->device);
55 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56 struct nouveau_engine *engine;
57
58 nouveau_fence_unref(&reg->fence);
59
60 if (tile->pitch)
61 pfb->tile.fini(pfb, i, tile);
62
63 if (pitch)
64 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
65
66 pfb->tile.prog(pfb, i, tile);
67
68 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
69 engine->tile_prog(engine, i);
70 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71 engine->tile_prog(engine, i);
72}
73
74static struct nouveau_drm_tile *
75nv10_bo_get_tile_region(struct drm_device *dev, int i)
76{
77 struct nouveau_drm *drm = nouveau_drm(dev);
78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
79
80 spin_lock(&drm->tile.lock);
81
82 if (!tile->used &&
83 (!tile->fence || nouveau_fence_done(tile->fence)))
84 tile->used = true;
85 else
86 tile = NULL;
87
88 spin_unlock(&drm->tile.lock);
89 return tile;
90}
91
92static void
93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94 struct nouveau_fence *fence)
95{
96 struct nouveau_drm *drm = nouveau_drm(dev);
97
98 if (tile) {
99 spin_lock(&drm->tile.lock);
100 if (fence) {
101 /* Mark it as pending. */
102 tile->fence = fence;
103 nouveau_fence_ref(fence);
104 }
105
106 tile->used = false;
107 spin_unlock(&drm->tile.lock);
108 }
109}
110
111static struct nouveau_drm_tile *
112nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
113 u32 size, u32 pitch, u32 flags)
114{
115 struct nouveau_drm *drm = nouveau_drm(dev);
116 struct nouveau_fb *pfb = nouveau_fb(drm->device);
117 struct nouveau_drm_tile *tile, *found = NULL;
118 int i;
119
120 for (i = 0; i < pfb->tile.regions; i++) {
121 tile = nv10_bo_get_tile_region(dev, i);
122
123 if (pitch && !found) {
124 found = tile;
125 continue;
126
127 } else if (tile && pfb->tile.region[i].pitch) {
128 /* Kill an unused tile region. */
129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
130 }
131
132 nv10_bo_put_tile_region(dev, tile, NULL);
133 }
134
135 if (found)
136 nv10_bo_update_tile_region(dev, found, addr, size,
137 pitch, flags);
138 return found;
139}
43 140
44static void 141static void
45nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 142nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
46{ 143{
47 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 144 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
48 struct drm_device *dev = dev_priv->dev; 145 struct drm_device *dev = drm->dev;
49 struct nouveau_bo *nvbo = nouveau_bo(bo); 146 struct nouveau_bo *nvbo = nouveau_bo(bo);
50 147
51 if (unlikely(nvbo->gem)) 148 if (unlikely(nvbo->gem))
52 DRM_ERROR("bo %p still attached to GEM object\n", bo); 149 DRM_ERROR("bo %p still attached to GEM object\n", bo);
53 150 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
54 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
55 kfree(nvbo); 151 kfree(nvbo);
56} 152}
57 153
@@ -59,23 +155,24 @@ static void
59nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
60 int *align, int *size) 156 int *align, int *size)
61{ 157{
62 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nouveau_device *device = nv_device(drm->device);
63 160
64 if (dev_priv->card_type < NV_50) { 161 if (device->card_type < NV_50) {
65 if (nvbo->tile_mode) { 162 if (nvbo->tile_mode) {
66 if (dev_priv->chipset >= 0x40) { 163 if (device->chipset >= 0x40) {
67 *align = 65536; 164 *align = 65536;
68 *size = roundup(*size, 64 * nvbo->tile_mode); 165 *size = roundup(*size, 64 * nvbo->tile_mode);
69 166
70 } else if (dev_priv->chipset >= 0x30) { 167 } else if (device->chipset >= 0x30) {
71 *align = 32768; 168 *align = 32768;
72 *size = roundup(*size, 64 * nvbo->tile_mode); 169 *size = roundup(*size, 64 * nvbo->tile_mode);
73 170
74 } else if (dev_priv->chipset >= 0x20) { 171 } else if (device->chipset >= 0x20) {
75 *align = 16384; 172 *align = 16384;
76 *size = roundup(*size, 64 * nvbo->tile_mode); 173 *size = roundup(*size, 64 * nvbo->tile_mode);
77 174
78 } else if (dev_priv->chipset >= 0x10) { 175 } else if (device->chipset >= 0x10) {
79 *align = 16384; 176 *align = 16384;
80 *size = roundup(*size, 32 * nvbo->tile_mode); 177 *size = roundup(*size, 32 * nvbo->tile_mode);
81 } 178 }
@@ -94,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
94 struct sg_table *sg, 191 struct sg_table *sg,
95 struct nouveau_bo **pnvbo) 192 struct nouveau_bo **pnvbo)
96{ 193{
97 struct drm_nouveau_private *dev_priv = dev->dev_private; 194 struct nouveau_drm *drm = nouveau_drm(dev);
98 struct nouveau_bo *nvbo; 195 struct nouveau_bo *nvbo;
99 size_t acc_size; 196 size_t acc_size;
100 int ret; 197 int ret;
@@ -111,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
111 INIT_LIST_HEAD(&nvbo->vma_list); 208 INIT_LIST_HEAD(&nvbo->vma_list);
112 nvbo->tile_mode = tile_mode; 209 nvbo->tile_mode = tile_mode;
113 nvbo->tile_flags = tile_flags; 210 nvbo->tile_flags = tile_flags;
114 nvbo->bo.bdev = &dev_priv->ttm.bdev; 211 nvbo->bo.bdev = &drm->ttm.bdev;
115 212
116 nvbo->page_shift = 12; 213 nvbo->page_shift = 12;
117 if (dev_priv->bar1_vm) { 214 if (drm->client.base.vm) {
118 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 215 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
119 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; 216 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
120 } 217 }
121 218
122 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
123 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 220 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
124 nouveau_bo_placement_set(nvbo, flags, 0); 221 nouveau_bo_placement_set(nvbo, flags, 0);
125 222
126 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, 223 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
127 sizeof(struct nouveau_bo)); 224 sizeof(struct nouveau_bo));
128 225
129 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
130 type, &nvbo->placement, 227 type, &nvbo->placement,
131 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, 228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
132 nouveau_bo_del_ttm); 229 nouveau_bo_del_ttm);
@@ -155,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
155static void 252static void
156set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 253set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
157{ 254{
158 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 255 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; 256 struct nouveau_fb *pfb = nouveau_fb(drm->device);
257 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
160 258
161 if (dev_priv->card_type == NV_10 && 259 if (nv_device(drm->device)->card_type == NV_10 &&
162 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 260 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
163 nvbo->bo.mem.num_pages < vram_pages / 4) { 261 nvbo->bo.mem.num_pages < vram_pages / 4) {
164 /* 262 /*
@@ -198,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
198int 296int
199nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 297nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
200{ 298{
201 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 299 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
202 struct ttm_buffer_object *bo = &nvbo->bo; 300 struct ttm_buffer_object *bo = &nvbo->bo;
203 int ret; 301 int ret;
204 302
205 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 303 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
206 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 304 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
208 1 << bo->mem.mem_type, memtype); 305 1 << bo->mem.mem_type, memtype);
209 return -EINVAL; 306 return -EINVAL;
210 } 307 }
@@ -222,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
222 if (ret == 0) { 319 if (ret == 0) {
223 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
224 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
225 dev_priv->fb_aper_free -= bo->mem.size; 322 drm->gem.vram_available -= bo->mem.size;
226 break; 323 break;
227 case TTM_PL_TT: 324 case TTM_PL_TT:
228 dev_priv->gart_info.aper_free -= bo->mem.size; 325 drm->gem.gart_available -= bo->mem.size;
229 break; 326 break;
230 default: 327 default:
231 break; 328 break;
@@ -241,7 +338,7 @@ out:
241int 338int
242nouveau_bo_unpin(struct nouveau_bo *nvbo) 339nouveau_bo_unpin(struct nouveau_bo *nvbo)
243{ 340{
244 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
245 struct ttm_buffer_object *bo = &nvbo->bo; 342 struct ttm_buffer_object *bo = &nvbo->bo;
246 int ret; 343 int ret;
247 344
@@ -258,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
258 if (ret == 0) { 355 if (ret == 0) {
259 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
260 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
261 dev_priv->fb_aper_free += bo->mem.size; 358 drm->gem.vram_available += bo->mem.size;
262 break; 359 break;
263 case TTM_PL_TT: 360 case TTM_PL_TT:
264 dev_priv->gart_info.aper_free += bo->mem.size; 361 drm->gem.gart_available += bo->mem.size;
265 break; 362 break;
266 default: 363 default:
267 break; 364 break;
@@ -356,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
356} 453}
357 454
358static struct ttm_tt * 455static struct ttm_tt *
359nouveau_ttm_tt_create(struct ttm_bo_device *bdev, 456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
360 unsigned long size, uint32_t page_flags, 457 uint32_t page_flags, struct page *dummy_read)
361 struct page *dummy_read_page)
362{ 458{
363 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 459 struct nouveau_drm *drm = nouveau_bdev(bdev);
364 struct drm_device *dev = dev_priv->dev; 460 struct drm_device *dev = drm->dev;
365 461
366 switch (dev_priv->gart_info.type) { 462 if (drm->agp.stat == ENABLED) {
367#if __OS_HAS_AGP 463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
368 case NOUVEAU_GART_AGP: 464 page_flags, dummy_read);
369 return ttm_agp_tt_create(bdev, dev->agp->bridge,
370 size, page_flags, dummy_read_page);
371#endif
372 case NOUVEAU_GART_PDMA:
373 case NOUVEAU_GART_HW:
374 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
375 dummy_read_page);
376 default:
377 NV_ERROR(dev, "Unknown GART type %d\n",
378 dev_priv->gart_info.type);
379 break;
380 } 465 }
381 466
382 return NULL; 467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
383} 468}
384 469
385static int 470static int
@@ -393,8 +478,7 @@ static int
393nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 478nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
394 struct ttm_mem_type_manager *man) 479 struct ttm_mem_type_manager *man)
395{ 480{
396 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 481 struct nouveau_drm *drm = nouveau_bdev(bdev);
397 struct drm_device *dev = dev_priv->dev;
398 482
399 switch (type) { 483 switch (type) {
400 case TTM_PL_SYSTEM: 484 case TTM_PL_SYSTEM:
@@ -403,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
403 man->default_caching = TTM_PL_FLAG_CACHED; 487 man->default_caching = TTM_PL_FLAG_CACHED;
404 break; 488 break;
405 case TTM_PL_VRAM: 489 case TTM_PL_VRAM:
406 if (dev_priv->card_type >= NV_50) { 490 if (nv_device(drm->device)->card_type >= NV_50) {
407 man->func = &nouveau_vram_manager; 491 man->func = &nouveau_vram_manager;
408 man->io_reserve_fastpath = false; 492 man->io_reserve_fastpath = false;
409 man->use_io_reserve_lru = true; 493 man->use_io_reserve_lru = true;
@@ -417,32 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
417 man->default_caching = TTM_PL_FLAG_WC; 501 man->default_caching = TTM_PL_FLAG_WC;
418 break; 502 break;
419 case TTM_PL_TT: 503 case TTM_PL_TT:
420 if (dev_priv->card_type >= NV_50) 504 if (nv_device(drm->device)->card_type >= NV_50)
421 man->func = &nouveau_gart_manager; 505 man->func = &nouveau_gart_manager;
422 else 506 else
507 if (drm->agp.stat != ENABLED)
508 man->func = &nv04_gart_manager;
509 else
423 man->func = &ttm_bo_manager_func; 510 man->func = &ttm_bo_manager_func;
424 switch (dev_priv->gart_info.type) { 511
425 case NOUVEAU_GART_AGP: 512 if (drm->agp.stat == ENABLED) {
426 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 513 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
427 man->available_caching = TTM_PL_FLAG_UNCACHED | 514 man->available_caching = TTM_PL_FLAG_UNCACHED |
428 TTM_PL_FLAG_WC; 515 TTM_PL_FLAG_WC;
429 man->default_caching = TTM_PL_FLAG_WC; 516 man->default_caching = TTM_PL_FLAG_WC;
430 break; 517 } else {
431 case NOUVEAU_GART_PDMA:
432 case NOUVEAU_GART_HW:
433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 518 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
434 TTM_MEMTYPE_FLAG_CMA; 519 TTM_MEMTYPE_FLAG_CMA;
435 man->available_caching = TTM_PL_MASK_CACHING; 520 man->available_caching = TTM_PL_MASK_CACHING;
436 man->default_caching = TTM_PL_FLAG_CACHED; 521 man->default_caching = TTM_PL_FLAG_CACHED;
437 break;
438 default:
439 NV_ERROR(dev, "Unknown GART type: %d\n",
440 dev_priv->gart_info.type);
441 return -EINVAL;
442 } 522 }
523
443 break; 524 break;
444 default: 525 default:
445 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
446 return -EINVAL; 526 return -EINVAL;
447 } 527 }
448 return 0; 528 return 0;
@@ -491,6 +571,18 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
491} 571}
492 572
493static int 573static int
574nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
575{
576 int ret = RING_SPACE(chan, 2);
577 if (ret == 0) {
578 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
579 OUT_RING (chan, handle);
580 FIRE_RING (chan);
581 }
582 return ret;
583}
584
585static int
494nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 586nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
495 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 587 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
496{ 588{
@@ -676,20 +768,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
676static int 768static int
677nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) 769nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{ 770{
679 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 771 int ret = RING_SPACE(chan, 6);
680 &chan->m2mf_ntfy);
681 if (ret == 0) { 772 if (ret == 0) {
682 ret = RING_SPACE(chan, 6); 773 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
683 if (ret == 0) { 774 OUT_RING (chan, handle);
684 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 775 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
685 OUT_RING (chan, handle); 776 OUT_RING (chan, NvNotify0);
686 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 777 OUT_RING (chan, NvDmaFB);
687 OUT_RING (chan, NvNotify0); 778 OUT_RING (chan, NvDmaFB);
688 OUT_RING (chan, NvDmaFB);
689 OUT_RING (chan, NvDmaFB);
690 } else {
691 nouveau_ramht_remove(chan, NvNotify0);
692 }
693 } 779 }
694 780
695 return ret; 781 return ret;
@@ -788,16 +874,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
788static int 874static int
789nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) 875nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
790{ 876{
791 int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, 877 int ret = RING_SPACE(chan, 4);
792 &chan->m2mf_ntfy);
793 if (ret == 0) { 878 if (ret == 0) {
794 ret = RING_SPACE(chan, 4); 879 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
795 if (ret == 0) { 880 OUT_RING (chan, handle);
796 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 881 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
797 OUT_RING (chan, handle); 882 OUT_RING (chan, NvNotify0);
798 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
799 OUT_RING (chan, NvNotify0);
800 }
801 } 883 }
802 884
803 return ret; 885 return ret;
@@ -808,8 +890,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
808 struct nouveau_channel *chan, struct ttm_mem_reg *mem) 890 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
809{ 891{
810 if (mem->mem_type == TTM_PL_TT) 892 if (mem->mem_type == TTM_PL_TT)
811 return chan->gart_handle; 893 return NvDmaTT;
812 return chan->vram_handle; 894 return NvDmaFB;
813} 895}
814 896
815static int 897static int
@@ -865,8 +947,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
865 struct nouveau_mem *node = mem->mm_node; 947 struct nouveau_mem *node = mem->mm_node;
866 int ret; 948 int ret;
867 949
868 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, 950 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
869 node->page_shift, NV_MEM_ACCESS_RO, vma); 951 PAGE_SHIFT, node->page_shift,
952 NV_MEM_ACCESS_RW, vma);
870 if (ret) 953 if (ret)
871 return ret; 954 return ret;
872 955
@@ -883,19 +966,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
883 bool no_wait_reserve, bool no_wait_gpu, 966 bool no_wait_reserve, bool no_wait_gpu,
884 struct ttm_mem_reg *new_mem) 967 struct ttm_mem_reg *new_mem)
885{ 968{
886 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
887 struct nouveau_channel *chan = chan = dev_priv->channel; 970 struct nouveau_channel *chan = chan = drm->channel;
888 struct nouveau_bo *nvbo = nouveau_bo(bo); 971 struct nouveau_bo *nvbo = nouveau_bo(bo);
889 struct ttm_mem_reg *old_mem = &bo->mem; 972 struct ttm_mem_reg *old_mem = &bo->mem;
890 int ret; 973 int ret;
891 974
892 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 975 mutex_lock(&chan->cli->mutex);
893 976
894 /* create temporary vmas for the transfer and attach them to the 977 /* create temporary vmas for the transfer and attach them to the
895 * old nouveau_mem node, these will get cleaned up after ttm has 978 * old nouveau_mem node, these will get cleaned up after ttm has
896 * destroyed the ttm_mem_reg 979 * destroyed the ttm_mem_reg
897 */ 980 */
898 if (dev_priv->card_type >= NV_50) { 981 if (nv_device(drm->device)->card_type >= NV_50) {
899 struct nouveau_mem *node = old_mem->mm_node; 982 struct nouveau_mem *node = old_mem->mm_node;
900 983
901 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); 984 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@@ -907,7 +990,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
907 goto out; 990 goto out;
908 } 991 }
909 992
910 ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
911 if (ret == 0) { 994 if (ret == 0) {
912 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
913 no_wait_reserve, 996 no_wait_reserve,
@@ -915,14 +998,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
915 } 998 }
916 999
917out: 1000out:
918 mutex_unlock(&chan->mutex); 1001 mutex_unlock(&chan->cli->mutex);
919 return ret; 1002 return ret;
920} 1003}
921 1004
922void 1005void
923nouveau_bo_move_init(struct nouveau_channel *chan) 1006nouveau_bo_move_init(struct nouveau_drm *drm)
924{ 1007{
925 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
926 static const struct { 1008 static const struct {
927 const char *name; 1009 const char *name;
928 int engine; 1010 int engine;
@@ -932,7 +1014,8 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
932 struct ttm_mem_reg *, struct ttm_mem_reg *); 1014 struct ttm_mem_reg *, struct ttm_mem_reg *);
933 int (*init)(struct nouveau_channel *, u32 handle); 1015 int (*init)(struct nouveau_channel *, u32 handle);
934 } _methods[] = { 1016 } _methods[] = {
935 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1017 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1018 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
936 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1019 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
937 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1020 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
938 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 1021 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
@@ -947,19 +1030,34 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
947 int ret; 1030 int ret;
948 1031
949 do { 1032 do {
1033 struct nouveau_object *object;
1034 struct nouveau_channel *chan;
950 u32 handle = (mthd->engine << 16) | mthd->oclass; 1035 u32 handle = (mthd->engine << 16) | mthd->oclass;
951 ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass); 1036
1037 if (mthd->init == nve0_bo_move_init)
1038 chan = drm->cechan;
1039 else
1040 chan = drm->channel;
1041 if (chan == NULL)
1042 continue;
1043
1044 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
1045 mthd->oclass, NULL, 0, &object);
952 if (ret == 0) { 1046 if (ret == 0) {
953 ret = mthd->init(chan, handle); 1047 ret = mthd->init(chan, handle);
954 if (ret == 0) { 1048 if (ret) {
955 dev_priv->ttm.move = mthd->exec; 1049 nouveau_object_del(nv_object(drm),
956 name = mthd->name; 1050 chan->handle, handle);
957 break; 1051 continue;
958 } 1052 }
1053
1054 drm->ttm.move = mthd->exec;
1055 name = mthd->name;
1056 break;
959 } 1057 }
960 } while ((++mthd)->exec); 1058 } while ((++mthd)->exec);
961 1059
962 NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); 1060 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
963} 1061}
964 1062
965static int 1063static int
@@ -1044,7 +1142,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1044 nouveau_vm_map(vma, new_mem->mm_node); 1142 nouveau_vm_map(vma, new_mem->mm_node);
1045 } else 1143 } else
1046 if (new_mem && new_mem->mem_type == TTM_PL_TT && 1144 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
1047 nvbo->page_shift == vma->vm->spg_shift) { 1145 nvbo->page_shift == vma->vm->vmm->spg_shift) {
1048 if (((struct nouveau_mem *)new_mem->mm_node)->sg) 1146 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1049 nouveau_vm_map_sg_table(vma, 0, new_mem-> 1147 nouveau_vm_map_sg_table(vma, 0, new_mem->
1050 num_pages << PAGE_SHIFT, 1148 num_pages << PAGE_SHIFT,
@@ -1061,10 +1159,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1061 1159
1062static int 1160static int
1063nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 1161nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1064 struct nouveau_tile_reg **new_tile) 1162 struct nouveau_drm_tile **new_tile)
1065{ 1163{
1066 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1164 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1067 struct drm_device *dev = dev_priv->dev; 1165 struct drm_device *dev = drm->dev;
1068 struct nouveau_bo *nvbo = nouveau_bo(bo); 1166 struct nouveau_bo *nvbo = nouveau_bo(bo);
1069 u64 offset = new_mem->start << PAGE_SHIFT; 1167 u64 offset = new_mem->start << PAGE_SHIFT;
1070 1168
@@ -1072,8 +1170,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1072 if (new_mem->mem_type != TTM_PL_VRAM) 1170 if (new_mem->mem_type != TTM_PL_VRAM)
1073 return 0; 1171 return 0;
1074 1172
1075 if (dev_priv->card_type >= NV_10) { 1173 if (nv_device(drm->device)->card_type >= NV_10) {
1076 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 1174 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1077 nvbo->tile_mode, 1175 nvbo->tile_mode,
1078 nvbo->tile_flags); 1176 nvbo->tile_flags);
1079 } 1177 }
@@ -1083,13 +1181,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1083 1181
1084static void 1182static void
1085nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 1183nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1086 struct nouveau_tile_reg *new_tile, 1184 struct nouveau_drm_tile *new_tile,
1087 struct nouveau_tile_reg **old_tile) 1185 struct nouveau_drm_tile **old_tile)
1088{ 1186{
1089 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1187 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1090 struct drm_device *dev = dev_priv->dev; 1188 struct drm_device *dev = drm->dev;
1091 1189
1092 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); 1190 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
1093 *old_tile = new_tile; 1191 *old_tile = new_tile;
1094} 1192}
1095 1193
@@ -1098,13 +1196,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1098 bool no_wait_reserve, bool no_wait_gpu, 1196 bool no_wait_reserve, bool no_wait_gpu,
1099 struct ttm_mem_reg *new_mem) 1197 struct ttm_mem_reg *new_mem)
1100{ 1198{
1101 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1199 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1102 struct nouveau_bo *nvbo = nouveau_bo(bo); 1200 struct nouveau_bo *nvbo = nouveau_bo(bo);
1103 struct ttm_mem_reg *old_mem = &bo->mem; 1201 struct ttm_mem_reg *old_mem = &bo->mem;
1104 struct nouveau_tile_reg *new_tile = NULL; 1202 struct nouveau_drm_tile *new_tile = NULL;
1105 int ret = 0; 1203 int ret = 0;
1106 1204
1107 if (dev_priv->card_type < NV_50) { 1205 if (nv_device(drm->device)->card_type < NV_50) {
1108 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1206 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1109 if (ret) 1207 if (ret)
1110 return ret; 1208 return ret;
@@ -1119,7 +1217,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1119 } 1217 }
1120 1218
1121 /* CPU copy if we have no accelerated method available */ 1219 /* CPU copy if we have no accelerated method available */
1122 if (!dev_priv->ttm.move) { 1220 if (!drm->ttm.move) {
1123 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1221 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1124 goto out; 1222 goto out;
1125 } 1223 }
@@ -1139,7 +1237,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1139 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1237 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1140 1238
1141out: 1239out:
1142 if (dev_priv->card_type < NV_50) { 1240 if (nv_device(drm->device)->card_type < NV_50) {
1143 if (ret) 1241 if (ret)
1144 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1242 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1145 else 1243 else
@@ -1159,8 +1257,8 @@ static int
1159nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1257nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1160{ 1258{
1161 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 1259 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1162 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1260 struct nouveau_drm *drm = nouveau_bdev(bdev);
1163 struct drm_device *dev = dev_priv->dev; 1261 struct drm_device *dev = drm->dev;
1164 int ret; 1262 int ret;
1165 1263
1166 mem->bus.addr = NULL; 1264 mem->bus.addr = NULL;
@@ -1176,48 +1274,28 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1176 return 0; 1274 return 0;
1177 case TTM_PL_TT: 1275 case TTM_PL_TT:
1178#if __OS_HAS_AGP 1276#if __OS_HAS_AGP
1179 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1277 if (drm->agp.stat == ENABLED) {
1180 mem->bus.offset = mem->start << PAGE_SHIFT; 1278 mem->bus.offset = mem->start << PAGE_SHIFT;
1181 mem->bus.base = dev_priv->gart_info.aper_base; 1279 mem->bus.base = drm->agp.base;
1182 mem->bus.is_iomem = true; 1280 mem->bus.is_iomem = true;
1183 } 1281 }
1184#endif 1282#endif
1185 break; 1283 break;
1186 case TTM_PL_VRAM: 1284 case TTM_PL_VRAM:
1187 { 1285 mem->bus.offset = mem->start << PAGE_SHIFT;
1188 struct nouveau_mem *node = mem->mm_node; 1286 mem->bus.base = pci_resource_start(dev->pdev, 1);
1189 u8 page_shift; 1287 mem->bus.is_iomem = true;
1190 1288 if (nv_device(drm->device)->card_type >= NV_50) {
1191 if (!dev_priv->bar1_vm) { 1289 struct nouveau_bar *bar = nouveau_bar(drm->device);
1192 mem->bus.offset = mem->start << PAGE_SHIFT; 1290 struct nouveau_mem *node = mem->mm_node;
1193 mem->bus.base = pci_resource_start(dev->pdev, 1);
1194 mem->bus.is_iomem = true;
1195 break;
1196 }
1197
1198 if (dev_priv->card_type >= NV_C0)
1199 page_shift = node->page_shift;
1200 else
1201 page_shift = 12;
1202 1291
1203 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 1292 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
1204 page_shift, NV_MEM_ACCESS_RW, 1293 &node->bar_vma);
1205 &node->bar_vma); 1294 if (ret)
1206 if (ret) 1295 return ret;
1207 return ret;
1208 1296
1209 nouveau_vm_map(&node->bar_vma, node); 1297 mem->bus.offset = node->bar_vma.offset;
1210 if (ret) {
1211 nouveau_vm_put(&node->bar_vma);
1212 return ret;
1213 } 1298 }
1214
1215 mem->bus.offset = node->bar_vma.offset;
1216 if (dev_priv->card_type == NV_50) /*XXX*/
1217 mem->bus.offset -= 0x0020000000ULL;
1218 mem->bus.base = pci_resource_start(dev->pdev, 1);
1219 mem->bus.is_iomem = true;
1220 }
1221 break; 1299 break;
1222 default: 1300 default:
1223 return -EINVAL; 1301 return -EINVAL;
@@ -1228,41 +1306,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1228static void 1306static void
1229nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 1307nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1230{ 1308{
1231 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 1309 struct nouveau_drm *drm = nouveau_bdev(bdev);
1310 struct nouveau_bar *bar = nouveau_bar(drm->device);
1232 struct nouveau_mem *node = mem->mm_node; 1311 struct nouveau_mem *node = mem->mm_node;
1233 1312
1234 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1235 return;
1236
1237 if (!node->bar_vma.node) 1313 if (!node->bar_vma.node)
1238 return; 1314 return;
1239 1315
1240 nouveau_vm_unmap(&node->bar_vma); 1316 bar->unmap(bar, &node->bar_vma);
1241 nouveau_vm_put(&node->bar_vma);
1242} 1317}
1243 1318
1244static int 1319static int
1245nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1320nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1246{ 1321{
1247 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 1322 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1248 struct nouveau_bo *nvbo = nouveau_bo(bo); 1323 struct nouveau_bo *nvbo = nouveau_bo(bo);
1324 struct nouveau_device *device = nv_device(drm->device);
1325 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
1249 1326
1250 /* as long as the bo isn't in vram, and isn't tiled, we've got 1327 /* as long as the bo isn't in vram, and isn't tiled, we've got
1251 * nothing to do here. 1328 * nothing to do here.
1252 */ 1329 */
1253 if (bo->mem.mem_type != TTM_PL_VRAM) { 1330 if (bo->mem.mem_type != TTM_PL_VRAM) {
1254 if (dev_priv->card_type < NV_50 || 1331 if (nv_device(drm->device)->card_type < NV_50 ||
1255 !nouveau_bo_tile_layout(nvbo)) 1332 !nouveau_bo_tile_layout(nvbo))
1256 return 0; 1333 return 0;
1257 } 1334 }
1258 1335
1259 /* make sure bo is in mappable vram */ 1336 /* make sure bo is in mappable vram */
1260 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) 1337 if (bo->mem.start + bo->mem.num_pages < mappable)
1261 return 0; 1338 return 0;
1262 1339
1263 1340
1264 nvbo->placement.fpfn = 0; 1341 nvbo->placement.fpfn = 0;
1265 nvbo->placement.lpfn = dev_priv->fb_mappable_pages; 1342 nvbo->placement.lpfn = mappable;
1266 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1343 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1267 return nouveau_bo_validate(nvbo, false, true, false); 1344 return nouveau_bo_validate(nvbo, false, true, false);
1268} 1345}
@@ -1271,7 +1348,7 @@ static int
1271nouveau_ttm_tt_populate(struct ttm_tt *ttm) 1348nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1272{ 1349{
1273 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1350 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1274 struct drm_nouveau_private *dev_priv; 1351 struct nouveau_drm *drm;
1275 struct drm_device *dev; 1352 struct drm_device *dev;
1276 unsigned i; 1353 unsigned i;
1277 int r; 1354 int r;
@@ -1288,11 +1365,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1288 return 0; 1365 return 0;
1289 } 1366 }
1290 1367
1291 dev_priv = nouveau_bdev(ttm->bdev); 1368 drm = nouveau_bdev(ttm->bdev);
1292 dev = dev_priv->dev; 1369 dev = drm->dev;
1293 1370
1294#if __OS_HAS_AGP 1371#if __OS_HAS_AGP
1295 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1372 if (drm->agp.stat == ENABLED) {
1296 return ttm_agp_tt_populate(ttm); 1373 return ttm_agp_tt_populate(ttm);
1297 } 1374 }
1298#endif 1375#endif
@@ -1329,7 +1406,7 @@ static void
1329nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1406nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1330{ 1407{
1331 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1408 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1332 struct drm_nouveau_private *dev_priv; 1409 struct nouveau_drm *drm;
1333 struct drm_device *dev; 1410 struct drm_device *dev;
1334 unsigned i; 1411 unsigned i;
1335 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1412 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1337,11 +1414,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1337 if (slave) 1414 if (slave)
1338 return; 1415 return;
1339 1416
1340 dev_priv = nouveau_bdev(ttm->bdev); 1417 drm = nouveau_bdev(ttm->bdev);
1341 dev = dev_priv->dev; 1418 dev = drm->dev;
1342 1419
1343#if __OS_HAS_AGP 1420#if __OS_HAS_AGP
1344 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 1421 if (drm->agp.stat == ENABLED) {
1345 ttm_agp_tt_unpopulate(ttm); 1422 ttm_agp_tt_unpopulate(ttm);
1346 return; 1423 return;
1347 } 1424 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
new file mode 100644
index 000000000000..dec51b1098fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -0,0 +1,99 @@
1#ifndef __NOUVEAU_BO_H__
2#define __NOUVEAU_BO_H__
3
4struct nouveau_channel;
5struct nouveau_fence;
6struct nouveau_vma;
7
8struct nouveau_bo {
9 struct ttm_buffer_object bo;
10 struct ttm_placement placement;
11 u32 valid_domains;
12 u32 placements[3];
13 u32 busy_placements[3];
14 struct ttm_bo_kmap_obj kmap;
15 struct list_head head;
16
17 /* protected by ttm_bo_reserve() */
18 struct drm_file *reserved_by;
19 struct list_head entry;
20 int pbbo_index;
21 bool validate_mapped;
22
23 struct list_head vma_list;
24 unsigned page_shift;
25
26 u32 tile_mode;
27 u32 tile_flags;
28 struct nouveau_drm_tile *tile;
29
30 struct drm_gem_object *gem;
31 int pin_refcnt;
32
33 struct ttm_bo_kmap_obj dma_buf_vmap;
34 int vmapping_count;
35};
36
37static inline struct nouveau_bo *
38nouveau_bo(struct ttm_buffer_object *bo)
39{
40 return container_of(bo, struct nouveau_bo, bo);
41}
42
43static inline int
44nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
45{
46 struct nouveau_bo *prev;
47
48 if (!pnvbo)
49 return -EINVAL;
50 prev = *pnvbo;
51
52 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
53 if (prev) {
54 struct ttm_buffer_object *bo = &prev->bo;
55
56 ttm_bo_unref(&bo);
57 }
58
59 return 0;
60}
61
62extern struct ttm_bo_driver nouveau_bo_driver;
63
64void nouveau_bo_move_init(struct nouveau_drm *);
65int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
66 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
67 struct nouveau_bo **);
68int nouveau_bo_pin(struct nouveau_bo *, u32 flags);
69int nouveau_bo_unpin(struct nouveau_bo *);
70int nouveau_bo_map(struct nouveau_bo *);
71void nouveau_bo_unmap(struct nouveau_bo *);
72void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
73u16 nouveau_bo_rd16(struct nouveau_bo *, unsigned index);
74void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
75u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
79 bool no_wait_reserve, bool no_wait_gpu);
80
81struct nouveau_vma *
82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
83
84int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
85 struct nouveau_vma *);
86void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
87
88/* TODO: submit equivalent to TTM generic API upstream? */
89static inline void __iomem *
90nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
91{
92 bool is_iomem;
93 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
94 &nvbo->kmap, &is_iomem);
95 WARN_ON_ONCE(ioptr && !is_iomem);
96 return ioptr;
97}
98
99#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index 2c5eb5d8d556..6da576445b3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -22,7 +22,9 @@
22 */ 22 */
23 23
24#include <drm/drmP.h> 24#include <drm/drmP.h>
25#include "nouveau_drv.h" 25
26#include "nouveau_drm.h"
27#include "nouveau_reg.h"
26#include "nouveau_hw.h" 28#include "nouveau_hw.h"
27 29
28/****************************************************************************\ 30/****************************************************************************\
@@ -195,12 +197,13 @@ static void
195nv04_update_arb(struct drm_device *dev, int VClk, int bpp, 197nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
196 int *burst, int *lwm) 198 int *burst, int *lwm)
197{ 199{
198 struct drm_nouveau_private *dev_priv = dev->dev_private; 200 struct nouveau_drm *drm = nouveau_drm(dev);
201 struct nouveau_device *device = nouveau_dev(dev);
199 struct nv_fifo_info fifo_data; 202 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data; 203 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); 204 int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
202 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); 205 int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
203 uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); 206 uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
204 207
205 sim_data.pclk_khz = VClk; 208 sim_data.pclk_khz = VClk;
206 sim_data.mclk_khz = MClk; 209 sim_data.mclk_khz = MClk;
@@ -218,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
218 sim_data.mem_latency = 3; 221 sim_data.mem_latency = 3;
219 sim_data.mem_page_miss = 10; 222 sim_data.mem_page_miss = 10;
220 } else { 223 } else {
221 sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1; 224 sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
222 sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; 225 sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
223 sim_data.mem_latency = cfg1 & 0xf; 226 sim_data.mem_latency = cfg1 & 0xf;
224 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); 227 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
225 } 228 }
226 229
227 if (dev_priv->card_type == NV_04) 230 if (nv_device(drm->device)->card_type == NV_04)
228 nv04_calc_arb(&fifo_data, &sim_data); 231 nv04_calc_arb(&fifo_data, &sim_data);
229 else 232 else
230 nv10_calc_arb(&fifo_data, &sim_data); 233 nv10_calc_arb(&fifo_data, &sim_data);
@@ -249,9 +252,9 @@ nv20_update_arb(int *burst, int *lwm)
249void 252void
250nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) 253nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
251{ 254{
252 struct drm_nouveau_private *dev_priv = dev->dev_private; 255 struct nouveau_drm *drm = nouveau_drm(dev);
253 256
254 if (dev_priv->card_type < NV_20) 257 if (nv_device(drm->device)->card_type < NV_20)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm); 258 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || 259 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { 260 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
@@ -260,219 +263,3 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
260 } else 263 } else
261 nv20_update_arb(burst, lwm); 264 nv20_update_arb(burst, lwm);
262} 265}
263
264static int
265getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
266 struct nouveau_pll_vals *bestpv)
267{
268 /* Find M, N and P for a single stage PLL
269 *
270 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
271 * values, but we're too lazy to use those atm
272 *
273 * "clk" parameter in kHz
274 * returns calculated clock
275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios.chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
281 int minU = pll_lim->vco1.min_inputfreq;
282 int maxU = pll_lim->vco1.max_inputfreq;
283 int minP = pll_lim->max_p ? pll_lim->min_p : 0;
284 int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
285 int crystal = pll_lim->refclk;
286 int M, N, thisP, P;
287 int clkP, calcclk;
288 int delta, bestdelta = INT_MAX;
289 int bestclk = 0;
290
291 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
292 /* possibly correlated with introduction of 27MHz crystal */
293 if (dev_priv->card_type < NV_50) {
294 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
295 if (clk > 250000)
296 maxM = 6;
297 if (clk > 340000)
298 maxM = 2;
299 } else if (cv < 0x40) {
300 if (clk > 150000)
301 maxM = 6;
302 if (clk > 200000)
303 maxM = 4;
304 if (clk > 340000)
305 maxM = 2;
306 }
307 }
308
309 P = pll_lim->max_p ? maxP : (1 << maxP);
310 if ((clk * P) < minvco) {
311 minvco = clk * maxP;
312 maxvco = minvco * 2;
313 }
314
315 if (clk + clk/200 > maxvco) /* +0.5% */
316 maxvco = clk + clk/200;
317
318 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
319 for (thisP = minP; thisP <= maxP; thisP++) {
320 P = pll_lim->max_p ? thisP : (1 << thisP);
321 clkP = clk * P;
322
323 if (clkP < minvco)
324 continue;
325 if (clkP > maxvco)
326 return bestclk;
327
328 for (M = minM; M <= maxM; M++) {
329 if (crystal/M < minU)
330 return bestclk;
331 if (crystal/M > maxU)
332 continue;
333
334 /* add crystal/2 to round better */
335 N = (clkP * M + crystal/2) / crystal;
336
337 if (N < minN)
338 continue;
339 if (N > maxN)
340 break;
341
342 /* more rounding additions */
343 calcclk = ((N * crystal + P/2) / P + M/2) / M;
344 delta = abs(calcclk - clk);
345 /* we do an exhaustive search rather than terminating
346 * on an optimality condition...
347 */
348 if (delta < bestdelta) {
349 bestdelta = delta;
350 bestclk = calcclk;
351 bestpv->N1 = N;
352 bestpv->M1 = M;
353 bestpv->log2P = thisP;
354 if (delta == 0) /* except this one */
355 return bestclk;
356 }
357 }
358 }
359
360 return bestclk;
361}
362
363static int
364getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
365 struct nouveau_pll_vals *bestpv)
366{
367 /* Find M, N and P for a two stage PLL
368 *
369 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
370 * values, but we're too lazy to use those atm
371 *
372 * "clk" parameter in kHz
373 * returns calculated clock
374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios.chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
380 int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
381 int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
382 int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
383 int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
384 int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
385 int maxlog2P = pll_lim->max_usable_log2p;
386 int crystal = pll_lim->refclk;
387 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
388 int M1, N1, M2, N2, log2P;
389 int clkP, calcclk1, calcclk2, calcclkout;
390 int delta, bestdelta = INT_MAX;
391 int bestclk = 0;
392
393 int vco2 = (maxvco2 - maxvco2/200) / 2;
394 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
395 ;
396 clkP = clk << log2P;
397
398 if (maxvco2 < clk + clk/200) /* +0.5% */
399 maxvco2 = clk + clk/200;
400
401 for (M1 = minM1; M1 <= maxM1; M1++) {
402 if (crystal/M1 < minU1)
403 return bestclk;
404 if (crystal/M1 > maxU1)
405 continue;
406
407 for (N1 = minN1; N1 <= maxN1; N1++) {
408 calcclk1 = crystal * N1 / M1;
409 if (calcclk1 < minvco1)
410 continue;
411 if (calcclk1 > maxvco1)
412 break;
413
414 for (M2 = minM2; M2 <= maxM2; M2++) {
415 if (calcclk1/M2 < minU2)
416 break;
417 if (calcclk1/M2 > maxU2)
418 continue;
419
420 /* add calcclk1/2 to round better */
421 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
422 if (N2 < minN2)
423 continue;
424 if (N2 > maxN2)
425 break;
426
427 if (!fixedgain2) {
428 if (chip_version < 0x60)
429 if (N2/M2 < 4 || N2/M2 > 10)
430 continue;
431
432 calcclk2 = calcclk1 * N2 / M2;
433 if (calcclk2 < minvco2)
434 break;
435 if (calcclk2 > maxvco2)
436 continue;
437 } else
438 calcclk2 = calcclk1;
439
440 calcclkout = calcclk2 >> log2P;
441 delta = abs(calcclkout - clk);
442 /* we do an exhaustive search rather than terminating
443 * on an optimality condition...
444 */
445 if (delta < bestdelta) {
446 bestdelta = delta;
447 bestclk = calcclkout;
448 bestpv->N1 = N1;
449 bestpv->M1 = M1;
450 bestpv->N2 = N2;
451 bestpv->M2 = M2;
452 bestpv->log2P = log2P;
453 if (delta == 0) /* except this one */
454 return bestclk;
455 }
456 }
457 }
458 }
459
460 return bestclk;
461}
462
463int
464nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
465 struct nouveau_pll_vals *pv)
466{
467 int outclk;
468
469 if (!pll_lim->vco2.maxfreq)
470 outclk = getMNP_single(dev, pll_lim, clk, pv);
471 else
472 outclk = getMNP_double(dev, pll_lim, clk, pv);
473
474 if (!outclk)
475 NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
476
477 return outclk;
478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 000000000000..c1d7301c0e9c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,400 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/object.h>
26#include <core/client.h>
27#include <core/device.h>
28#include <core/class.h>
29
30#include <subdev/fb.h>
31#include <subdev/vm.h>
32#include <subdev/instmem.h>
33
34#include <engine/software.h>
35
36#include "nouveau_drm.h"
37#include "nouveau_dma.h"
38#include "nouveau_bo.h"
39#include "nouveau_chan.h"
40#include "nouveau_fence.h"
41#include "nouveau_abi16.h"
42
43MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
44static int nouveau_vram_pushbuf;
45module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
46
47int
48nouveau_channel_idle(struct nouveau_channel *chan)
49{
50 struct nouveau_cli *cli = chan->cli;
51 struct nouveau_fence *fence = NULL;
52 int ret;
53
54 ret = nouveau_fence_new(chan, &fence);
55 if (!ret) {
56 ret = nouveau_fence_wait(fence, false, false);
57 nouveau_fence_unref(&fence);
58 }
59
60 if (ret)
61 NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle);
62 return ret;
63}
64
65void
66nouveau_channel_del(struct nouveau_channel **pchan)
67{
68 struct nouveau_channel *chan = *pchan;
69 if (chan) {
70 struct nouveau_object *client = nv_object(chan->cli);
71 if (chan->fence) {
72 nouveau_channel_idle(chan);
73 nouveau_fence(chan->drm)->context_del(chan);
74 }
75 nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
78 nouveau_bo_unmap(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer);
80 kfree(chan);
81 }
82 *pchan = NULL;
83}
84
85static int
86nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
87 u32 parent, u32 handle, u32 size,
88 struct nouveau_channel **pchan)
89{
90 struct nouveau_device *device = nv_device(drm->device);
91 struct nouveau_instmem *imem = nouveau_instmem(device);
92 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
93 struct nouveau_fb *pfb = nouveau_fb(device);
94 struct nouveau_client *client = &cli->base;
95 struct nv_dma_class args = {};
96 struct nouveau_channel *chan;
97 struct nouveau_object *push;
98 u32 target;
99 int ret;
100
101 chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
102 if (!chan)
103 return -ENOMEM;
104
105 chan->cli = cli;
106 chan->drm = drm;
107 chan->handle = handle;
108
109 /* allocate memory for dma push buffer */
110 target = TTM_PL_FLAG_TT;
111 if (nouveau_vram_pushbuf)
112 target = TTM_PL_FLAG_VRAM;
113
114 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
115 &chan->push.buffer);
116 if (ret == 0) {
117 ret = nouveau_bo_pin(chan->push.buffer, target);
118 if (ret == 0)
119 ret = nouveau_bo_map(chan->push.buffer);
120 }
121
122 if (ret) {
123 nouveau_channel_del(pchan);
124 return ret;
125 }
126
127 /* create dma object covering the *entire* memory space that the
128 * pushbuf lives in, this is because the GEM code requires that
129 * we be able to call out to other (indirect) push buffers
130 */
131 chan->push.vma.offset = chan->push.buffer->bo.offset;
132 chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
133
134 if (device->card_type >= NV_50) {
135 ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
136 &chan->push.vma);
137 if (ret) {
138 nouveau_channel_del(pchan);
139 return ret;
140 }
141
142 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
143 args.start = 0;
144 args.limit = client->vm->vmm->limit - 1;
145 } else
146 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
147 u64 limit = pfb->ram.size - imem->reserved - 1;
148 if (device->card_type == NV_04) {
149 /* nv04 vram pushbuf hack, retarget to its location in
150 * the framebuffer bar rather than direct vram access..
151 * nfi why this exists, it came from the -nv ddx.
152 */
153 args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
154 args.start = pci_resource_start(device->pdev, 1);
155 args.limit = args.start + limit;
156 } else {
157 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
158 args.start = 0;
159 args.limit = limit;
160 }
161 } else {
162 if (chan->drm->agp.stat == ENABLED) {
163 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
164 args.start = chan->drm->agp.base;
165 args.limit = chan->drm->agp.base +
166 chan->drm->agp.size - 1;
167 } else {
168 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
169 args.start = 0;
170 args.limit = vmm->limit - 1;
171 }
172 }
173
174 ret = nouveau_object_new(nv_object(chan->cli), parent,
175 chan->push.handle, 0x0002,
176 &args, sizeof(args), &push);
177 if (ret) {
178 nouveau_channel_del(pchan);
179 return ret;
180 }
181
182 return 0;
183}
184
185static int
186nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
187 u32 parent, u32 handle, u32 engine,
188 struct nouveau_channel **pchan)
189{
190 static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
191 NVC0_CHANNEL_IND_CLASS,
192 NV84_CHANNEL_IND_CLASS,
193 NV50_CHANNEL_IND_CLASS,
194 0 };
195 const u16 *oclass = oclasses;
196 struct nve0_channel_ind_class args;
197 struct nouveau_channel *chan;
198 int ret;
199
200 /* allocate dma push buffer */
201 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
202 *pchan = chan;
203 if (ret)
204 return ret;
205
206 /* create channel object */
207 args.pushbuf = chan->push.handle;
208 args.ioffset = 0x10000 + chan->push.vma.offset;
209 args.ilength = 0x02000;
210 args.engine = engine;
211
212 do {
213 ret = nouveau_object_new(nv_object(cli), parent, handle,
214 *oclass++, &args, sizeof(args),
215 &chan->object);
216 if (ret == 0)
217 return ret;
218 } while (*oclass);
219
220 nouveau_channel_del(pchan);
221 return ret;
222}
223
224static int
225nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
226 u32 parent, u32 handle, struct nouveau_channel **pchan)
227{
228 static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
229 NV17_CHANNEL_DMA_CLASS,
230 NV10_CHANNEL_DMA_CLASS,
231 NV03_CHANNEL_DMA_CLASS,
232 0 };
233 const u16 *oclass = oclasses;
234 struct nv03_channel_dma_class args;
235 struct nouveau_channel *chan;
236 int ret;
237
238 /* allocate dma push buffer */
239 ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
240 *pchan = chan;
241 if (ret)
242 return ret;
243
244 /* create channel object */
245 args.pushbuf = chan->push.handle;
246 args.offset = chan->push.vma.offset;
247
248 do {
249 ret = nouveau_object_new(nv_object(cli), parent, handle,
250 *oclass++, &args, sizeof(args),
251 &chan->object);
252 if (ret == 0)
253 return ret;
254 } while (ret && *oclass);
255
256 nouveau_channel_del(pchan);
257 return ret;
258}
259
260static int
261nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
262{
263 struct nouveau_client *client = nv_client(chan->cli);
264 struct nouveau_device *device = nv_device(chan->drm->device);
265 struct nouveau_instmem *imem = nouveau_instmem(device);
266 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
267 struct nouveau_fb *pfb = nouveau_fb(device);
268 struct nouveau_software_chan *swch;
269 struct nouveau_object *object;
270 struct nv_dma_class args;
271 int ret, i;
272
273 /* allocate dma objects to cover all allowed vram, and gart */
274 if (device->card_type < NV_C0) {
275 if (device->card_type >= NV_50) {
276 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
277 args.start = 0;
278 args.limit = client->vm->vmm->limit - 1;
279 } else {
280 args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
281 args.start = 0;
282 args.limit = pfb->ram.size - imem->reserved - 1;
283 }
284
285 ret = nouveau_object_new(nv_object(client), chan->handle, vram,
286 0x003d, &args, sizeof(args), &object);
287 if (ret)
288 return ret;
289
290 if (device->card_type >= NV_50) {
291 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
292 args.start = 0;
293 args.limit = client->vm->vmm->limit - 1;
294 } else
295 if (chan->drm->agp.stat == ENABLED) {
296 args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
297 args.start = chan->drm->agp.base;
298 args.limit = chan->drm->agp.base +
299 chan->drm->agp.size - 1;
300 } else {
301 args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
302 args.start = 0;
303 args.limit = vmm->limit - 1;
304 }
305
306 ret = nouveau_object_new(nv_object(client), chan->handle, gart,
307 0x003d, &args, sizeof(args), &object);
308 if (ret)
309 return ret;
310
311 chan->vram = vram;
312 chan->gart = gart;
313 }
314
315 /* initialise dma tracking parameters */
316 switch (nv_hclass(chan->object) & 0x00ff) {
317 case 0x006b:
318 case 0x006e:
319 chan->user_put = 0x40;
320 chan->user_get = 0x44;
321 chan->dma.max = (0x10000 / 4) - 2;
322 break;
323 default:
324 chan->user_put = 0x40;
325 chan->user_get = 0x44;
326 chan->user_get_hi = 0x60;
327 chan->dma.ib_base = 0x10000 / 4;
328 chan->dma.ib_max = (0x02000 / 8) - 1;
329 chan->dma.ib_put = 0;
330 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
331 chan->dma.max = chan->dma.ib_base;
332 break;
333 }
334
335 chan->dma.put = 0;
336 chan->dma.cur = chan->dma.put;
337 chan->dma.free = chan->dma.max - chan->dma.cur;
338
339 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
340 if (ret)
341 return ret;
342
343 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
344 OUT_RING(chan, 0x00000000);
345
346 /* allocate software object class (used for fences on <= nv05, and
347 * to signal flip completion), bind it to a subchannel.
348 */
349 if (chan != chan->drm->cechan) {
350 ret = nouveau_object_new(nv_object(client), chan->handle,
351 NvSw, nouveau_abi16_swclass(chan->drm),
352 NULL, 0, &object);
353 if (ret)
354 return ret;
355
356 swch = (void *)object->parent;
357 swch->flip = nouveau_flip_complete;
358 swch->flip_data = chan;
359 }
360
361 if (device->card_type < NV_C0) {
362 ret = RING_SPACE(chan, 2);
363 if (ret)
364 return ret;
365
366 BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
367 OUT_RING (chan, NvSw);
368 FIRE_RING (chan);
369 }
370
371 /* initialise synchronisation */
372 return nouveau_fence(chan->drm)->context_new(chan);
373}
374
375int
376nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
377 u32 parent, u32 handle, u32 arg0, u32 arg1,
378 struct nouveau_channel **pchan)
379{
380 int ret;
381
382 ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
383 if (ret) {
384 NV_DEBUG(cli, "ib channel create, %d\n", ret);
385 ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
386 if (ret) {
387 NV_DEBUG(cli, "dma channel create, %d\n", ret);
388 return ret;
389 }
390 }
391
392 ret = nouveau_channel_init(*pchan, arg0, arg1);
393 if (ret) {
394 NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
395 nouveau_channel_del(pchan);
396 return ret;
397 }
398
399 return 0;
400}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 000000000000..40f97e2c47b6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
1#ifndef __NOUVEAU_CHAN_H__
2#define __NOUVEAU_CHAN_H__
3
4struct nouveau_cli;
5
6struct nouveau_channel {
7 struct nouveau_cli *cli;
8 struct nouveau_drm *drm;
9
10 u32 handle;
11 u32 vram;
12 u32 gart;
13
14 struct {
15 struct nouveau_bo *buffer;
16 struct nouveau_vma vma;
17 u32 handle;
18 } push;
19
20 /* TODO: this will be reworked in the near future */
21 bool accel_done;
22 void *fence;
23 struct {
24 int max;
25 int free;
26 int cur;
27 int put;
28 int ib_base;
29 int ib_max;
30 int ib_free;
31 int ib_put;
32 } dma;
33 u32 user_get_hi;
34 u32 user_get;
35 u32 user_put;
36
37 struct nouveau_object *object;
38};
39
40
41int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
42 u32 parent, u32 handle, u32 arg0, u32 arg1,
43 struct nouveau_channel **);
44void nouveau_channel_del(struct nouveau_channel **);
45int nouveau_channel_idle(struct nouveau_channel *);
46
47#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
deleted file mode 100644
index cd180c678c13..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ /dev/null
@@ -1,396 +0,0 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include <drm/nouveau_drm.h>
28#include "nouveau_dma.h"
29#include "nouveau_fifo.h"
30#include "nouveau_ramht.h"
31#include "nouveau_fence.h"
32#include "nouveau_software.h"
33
34static int
35nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
36{
37 u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
38 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 int ret;
41
42 /* allocate buffer object */
43 ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
44 if (ret)
45 goto out;
46
47 ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
48 if (ret)
49 goto out;
50
51 ret = nouveau_bo_map(chan->pushbuf_bo);
52 if (ret)
53 goto out;
54
55 /* create DMA object covering the entire memtype where the push
56 * buffer resides, userspace can submit its own push buffers from
57 * anywhere within the same memtype.
58 */
59 chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
60 if (dev_priv->card_type >= NV_50) {
61 ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
62 &chan->pushbuf_vma);
63 if (ret)
64 goto out;
65
66 if (dev_priv->card_type < NV_C0) {
67 ret = nouveau_gpuobj_dma_new(chan,
68 NV_CLASS_DMA_IN_MEMORY, 0,
69 (1ULL << 40),
70 NV_MEM_ACCESS_RO,
71 NV_MEM_TARGET_VM,
72 &chan->pushbuf);
73 }
74 chan->pushbuf_base = chan->pushbuf_vma.offset;
75 } else
76 if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
77 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
78 dev_priv->gart_info.aper_size,
79 NV_MEM_ACCESS_RO,
80 NV_MEM_TARGET_GART,
81 &chan->pushbuf);
82 } else
83 if (dev_priv->card_type != NV_04) {
84 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
85 dev_priv->fb_available_size,
86 NV_MEM_ACCESS_RO,
87 NV_MEM_TARGET_VRAM,
88 &chan->pushbuf);
89 } else {
90 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
91 * exact reason for existing :) PCI access to cmdbuf in
92 * VRAM.
93 */
94 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
95 pci_resource_start(dev->pdev, 1),
96 dev_priv->fb_available_size,
97 NV_MEM_ACCESS_RO,
98 NV_MEM_TARGET_PCI,
99 &chan->pushbuf);
100 }
101
102out:
103 if (ret) {
104 NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
105 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
106 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
107 if (chan->pushbuf_bo) {
108 nouveau_bo_unmap(chan->pushbuf_bo);
109 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
110 }
111 }
112
113 return 0;
114}
115
116/* allocates and initializes a fifo for user space consumption */
117int
118nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
119 struct drm_file *file_priv,
120 uint32_t vram_handle, uint32_t gart_handle)
121{
122 struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
123 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
126 struct nouveau_channel *chan;
127 unsigned long flags;
128 int ret, i;
129
130 /* allocate and lock channel structure */
131 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
132 if (!chan)
133 return -ENOMEM;
134 chan->dev = dev;
135 chan->file_priv = file_priv;
136 chan->vram_handle = vram_handle;
137 chan->gart_handle = gart_handle;
138
139 kref_init(&chan->ref);
140 atomic_set(&chan->users, 1);
141 mutex_init(&chan->mutex);
142 mutex_lock(&chan->mutex);
143
144 /* allocate hw channel id */
145 spin_lock_irqsave(&dev_priv->channels.lock, flags);
146 for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
147 if (!dev_priv->channels.ptr[chan->id]) {
148 nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
149 break;
150 }
151 }
152 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
153
154 if (chan->id == pfifo->channels) {
155 mutex_unlock(&chan->mutex);
156 kfree(chan);
157 return -ENODEV;
158 }
159
160 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
161
162 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
164 if (ret) {
165 NV_ERROR(dev, "gpuobj %d\n", ret);
166 nouveau_channel_put(&chan);
167 return ret;
168 }
169
170 /* Allocate space for per-channel fixed notifier memory */
171 ret = nouveau_notifier_init_channel(chan);
172 if (ret) {
173 NV_ERROR(dev, "ntfy %d\n", ret);
174 nouveau_channel_put(&chan);
175 return ret;
176 }
177
178 /* Allocate DMA push buffer */
179 ret = nouveau_channel_pushbuf_init(chan);
180 if (ret) {
181 NV_ERROR(dev, "pushbuf %d\n", ret);
182 nouveau_channel_put(&chan);
183 return ret;
184 }
185
186 nouveau_dma_init(chan);
187 chan->user_put = 0x40;
188 chan->user_get = 0x44;
189 if (dev_priv->card_type >= NV_50)
190 chan->user_get_hi = 0x60;
191
192 /* create fifo context */
193 ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
194 if (ret) {
195 nouveau_channel_put(&chan);
196 return ret;
197 }
198
199 /* Insert NOPs for NOUVEAU_DMA_SKIPS */
200 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
201 if (ret) {
202 nouveau_channel_put(&chan);
203 return ret;
204 }
205
206 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
207 OUT_RING (chan, 0x00000000);
208
209 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
210 if (ret) {
211 nouveau_channel_put(&chan);
212 return ret;
213 }
214
215 if (dev_priv->card_type < NV_C0) {
216 ret = RING_SPACE(chan, 2);
217 if (ret) {
218 nouveau_channel_put(&chan);
219 return ret;
220 }
221
222 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
223 OUT_RING (chan, NvSw);
224 FIRE_RING (chan);
225 }
226
227 FIRE_RING(chan);
228
229 ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
230 if (ret) {
231 nouveau_channel_put(&chan);
232 return ret;
233 }
234
235 nouveau_debugfs_channel_init(chan);
236
237 NV_DEBUG(dev, "channel %d initialised\n", chan->id);
238 if (fpriv) {
239 spin_lock(&fpriv->lock);
240 list_add(&chan->list, &fpriv->channels);
241 spin_unlock(&fpriv->lock);
242 }
243 *chan_ret = chan;
244 return 0;
245}
246
247struct nouveau_channel *
248nouveau_channel_get_unlocked(struct nouveau_channel *ref)
249{
250 struct nouveau_channel *chan = NULL;
251
252 if (likely(ref && atomic_inc_not_zero(&ref->users)))
253 nouveau_channel_ref(ref, &chan);
254
255 return chan;
256}
257
258struct nouveau_channel *
259nouveau_channel_get(struct drm_file *file_priv, int id)
260{
261 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
262 struct nouveau_channel *chan;
263
264 spin_lock(&fpriv->lock);
265 list_for_each_entry(chan, &fpriv->channels, list) {
266 if (chan->id == id) {
267 chan = nouveau_channel_get_unlocked(chan);
268 spin_unlock(&fpriv->lock);
269 mutex_lock(&chan->mutex);
270 return chan;
271 }
272 }
273 spin_unlock(&fpriv->lock);
274
275 return ERR_PTR(-EINVAL);
276}
277
278void
279nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
280{
281 struct nouveau_channel *chan = *pchan;
282 struct drm_device *dev = chan->dev;
283 struct drm_nouveau_private *dev_priv = dev->dev_private;
284 unsigned long flags;
285 int i;
286
287 /* decrement the refcount, and we're done if there's still refs */
288 if (likely(!atomic_dec_and_test(&chan->users))) {
289 nouveau_channel_ref(NULL, pchan);
290 return;
291 }
292
293 /* no one wants the channel anymore */
294 NV_DEBUG(dev, "freeing channel %d\n", chan->id);
295 nouveau_debugfs_channel_fini(chan);
296
297 /* give it chance to idle */
298 nouveau_channel_idle(chan);
299
300 /* destroy the engine specific contexts */
301 for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
302 if (chan->engctx[i])
303 dev_priv->eng[i]->context_del(chan, i);
304 }
305
306 /* aside from its resources, the channel should now be dead,
307 * remove it from the channel list
308 */
309 spin_lock_irqsave(&dev_priv->channels.lock, flags);
310 nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
311 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
312
313 /* destroy any resources the channel owned */
314 nouveau_gpuobj_ref(NULL, &chan->pushbuf);
315 if (chan->pushbuf_bo) {
316 nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
317 nouveau_bo_unmap(chan->pushbuf_bo);
318 nouveau_bo_unpin(chan->pushbuf_bo);
319 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
320 }
321 nouveau_ramht_ref(NULL, &chan->ramht, chan);
322 nouveau_notifier_takedown_channel(chan);
323 nouveau_gpuobj_channel_takedown(chan);
324
325 nouveau_channel_ref(NULL, pchan);
326}
327
328void
329nouveau_channel_put(struct nouveau_channel **pchan)
330{
331 mutex_unlock(&(*pchan)->mutex);
332 nouveau_channel_put_unlocked(pchan);
333}
334
335static void
336nouveau_channel_del(struct kref *ref)
337{
338 struct nouveau_channel *chan =
339 container_of(ref, struct nouveau_channel, ref);
340
341 kfree(chan);
342}
343
344void
345nouveau_channel_ref(struct nouveau_channel *chan,
346 struct nouveau_channel **pchan)
347{
348 if (chan)
349 kref_get(&chan->ref);
350
351 if (*pchan)
352 kref_put(&(*pchan)->ref, nouveau_channel_del);
353
354 *pchan = chan;
355}
356
357int
358nouveau_channel_idle(struct nouveau_channel *chan)
359{
360 struct drm_device *dev = chan->dev;
361 struct nouveau_fence *fence = NULL;
362 int ret;
363
364 ret = nouveau_fence_new(chan, &fence);
365 if (!ret) {
366 ret = nouveau_fence_wait(fence, false, false);
367 nouveau_fence_unref(&fence);
368 }
369
370 if (ret)
371 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
372 return ret;
373}
374
375/* cleans up all the fifos from file_priv */
376void
377nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
378{
379 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
380 struct nouveau_channel *chan;
381 int i;
382
383 if (!pfifo)
384 return;
385
386 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
387 for (i = 0; i < pfifo->channels; i++) {
388 chan = nouveau_channel_get(file_priv, i);
389 if (IS_ERR(chan))
390 continue;
391
392 list_del(&chan->list);
393 atomic_dec(&chan->users);
394 nouveau_channel_put(&chan);
395 }
396}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index abb92de98573..9a6e2cb282dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -31,12 +31,29 @@
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32 32
33#include "nouveau_reg.h" 33#include "nouveau_reg.h"
34#include "nouveau_drv.h" 34#include "nouveau_drm.h"
35#include "nouveau_hw.h"
36#include "nouveau_acpi.h"
37
38#include "nouveau_display.h"
39#include "nouveau_connector.h"
35#include "nouveau_encoder.h" 40#include "nouveau_encoder.h"
36#include "nouveau_crtc.h" 41#include "nouveau_crtc.h"
37#include "nouveau_connector.h" 42
38#include "nouveau_gpio.h" 43#include <subdev/i2c.h>
39#include "nouveau_hw.h" 44#include <subdev/gpio.h>
45
46MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
47static int nouveau_tv_disable = 0;
48module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
49
50MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
51static int nouveau_ignorelid = 0;
52module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
53
54MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
55static int nouveau_duallink = 1;
56module_param_named(duallink, nouveau_duallink, int, 0400);
40 57
41static void nouveau_connector_hotplug(void *, int); 58static void nouveau_connector_hotplug(void *, int);
42 59
@@ -58,7 +75,7 @@ find_encoder(struct drm_connector *connector, int type)
58 continue; 75 continue;
59 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 76 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
60 77
61 if (type == OUTPUT_ANY || nv_encoder->dcb->type == type) 78 if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
62 return nv_encoder; 79 return nv_encoder;
63 } 80 }
64 81
@@ -83,19 +100,21 @@ static void
83nouveau_connector_destroy(struct drm_connector *connector) 100nouveau_connector_destroy(struct drm_connector *connector)
84{ 101{
85 struct nouveau_connector *nv_connector = nouveau_connector(connector); 102 struct nouveau_connector *nv_connector = nouveau_connector(connector);
86 struct drm_nouveau_private *dev_priv; 103 struct nouveau_gpio *gpio;
104 struct nouveau_drm *drm;
87 struct drm_device *dev; 105 struct drm_device *dev;
88 106
89 if (!nv_connector) 107 if (!nv_connector)
90 return; 108 return;
91 109
92 dev = nv_connector->base.dev; 110 dev = nv_connector->base.dev;
93 dev_priv = dev->dev_private; 111 drm = nouveau_drm(dev);
94 NV_DEBUG_KMS(dev, "\n"); 112 gpio = nouveau_gpio(drm->device);
113 NV_DEBUG(drm, "\n");
95 114
96 if (nv_connector->hpd != DCB_GPIO_UNUSED) { 115 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
97 nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff, 116 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
98 nouveau_connector_hotplug, connector); 117 nouveau_connector_hotplug, connector);
99 } 118 }
100 119
101 kfree(nv_connector->edid); 120 kfree(nv_connector->edid);
@@ -104,15 +123,17 @@ nouveau_connector_destroy(struct drm_connector *connector)
104 kfree(connector); 123 kfree(connector);
105} 124}
106 125
107static struct nouveau_i2c_chan * 126static struct nouveau_i2c_port *
108nouveau_connector_ddc_detect(struct drm_connector *connector, 127nouveau_connector_ddc_detect(struct drm_connector *connector,
109 struct nouveau_encoder **pnv_encoder) 128 struct nouveau_encoder **pnv_encoder)
110{ 129{
111 struct drm_device *dev = connector->dev; 130 struct drm_device *dev = connector->dev;
131 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
112 int i; 133 int i;
113 134
114 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 135 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
115 struct nouveau_i2c_chan *i2c = NULL; 136 struct nouveau_i2c_port *port = NULL;
116 struct nouveau_encoder *nv_encoder; 137 struct nouveau_encoder *nv_encoder;
117 struct drm_mode_object *obj; 138 struct drm_mode_object *obj;
118 int id; 139 int id;
@@ -127,11 +148,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
127 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 148 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
128 149
129 if (nv_encoder->dcb->i2c_index < 0xf) 150 if (nv_encoder->dcb->i2c_index < 0xf)
130 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 151 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
131 152 if (port && nv_probe_i2c(port, 0x50)) {
132 if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
133 *pnv_encoder = nv_encoder; 153 *pnv_encoder = nv_encoder;
134 return i2c; 154 return port;
135 } 155 }
136 } 156 }
137 157
@@ -148,8 +168,8 @@ nouveau_connector_of_detect(struct drm_connector *connector)
148 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev); 168 struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
149 169
150 if (!dn || 170 if (!dn ||
151 !((nv_encoder = find_encoder(connector, OUTPUT_TMDS)) || 171 !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
152 (nv_encoder = find_encoder(connector, OUTPUT_ANALOG)))) 172 (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
153 return NULL; 173 return NULL;
154 174
155 for_each_child_of_node(dn, cn) { 175 for_each_child_of_node(dn, cn) {
@@ -173,25 +193,25 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
173 struct nouveau_encoder *nv_encoder) 193 struct nouveau_encoder *nv_encoder)
174{ 194{
175 struct nouveau_connector *nv_connector = nouveau_connector(connector); 195 struct nouveau_connector *nv_connector = nouveau_connector(connector);
176 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 196 struct nouveau_drm *drm = nouveau_drm(connector->dev);
177 struct drm_device *dev = connector->dev; 197 struct drm_device *dev = connector->dev;
178 198
179 if (nv_connector->detected_encoder == nv_encoder) 199 if (nv_connector->detected_encoder == nv_encoder)
180 return; 200 return;
181 nv_connector->detected_encoder = nv_encoder; 201 nv_connector->detected_encoder = nv_encoder;
182 202
183 if (dev_priv->card_type >= NV_50) { 203 if (nv_device(drm->device)->card_type >= NV_50) {
184 connector->interlace_allowed = true; 204 connector->interlace_allowed = true;
185 connector->doublescan_allowed = true; 205 connector->doublescan_allowed = true;
186 } else 206 } else
187 if (nv_encoder->dcb->type == OUTPUT_LVDS || 207 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
188 nv_encoder->dcb->type == OUTPUT_TMDS) { 208 nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
189 connector->doublescan_allowed = false; 209 connector->doublescan_allowed = false;
190 connector->interlace_allowed = false; 210 connector->interlace_allowed = false;
191 } else { 211 } else {
192 connector->doublescan_allowed = true; 212 connector->doublescan_allowed = true;
193 if (dev_priv->card_type == NV_20 || 213 if (nv_device(drm->device)->card_type == NV_20 ||
194 (dev_priv->card_type == NV_10 && 214 (nv_device(drm->device)->card_type == NV_10 &&
195 (dev->pci_device & 0x0ff0) != 0x0100 && 215 (dev->pci_device & 0x0ff0) != 0x0100 &&
196 (dev->pci_device & 0x0ff0) != 0x0150)) 216 (dev->pci_device & 0x0ff0) != 0x0150))
197 /* HW is broken */ 217 /* HW is broken */
@@ -203,7 +223,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
203 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 223 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
204 drm_connector_property_set_value(connector, 224 drm_connector_property_set_value(connector,
205 dev->mode_config.dvi_i_subconnector_property, 225 dev->mode_config.dvi_i_subconnector_property,
206 nv_encoder->dcb->type == OUTPUT_TMDS ? 226 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
207 DRM_MODE_SUBCONNECTOR_DVID : 227 DRM_MODE_SUBCONNECTOR_DVID :
208 DRM_MODE_SUBCONNECTOR_DVIA); 228 DRM_MODE_SUBCONNECTOR_DVIA);
209 } 229 }
@@ -213,10 +233,11 @@ static enum drm_connector_status
213nouveau_connector_detect(struct drm_connector *connector, bool force) 233nouveau_connector_detect(struct drm_connector *connector, bool force)
214{ 234{
215 struct drm_device *dev = connector->dev; 235 struct drm_device *dev = connector->dev;
236 struct nouveau_drm *drm = nouveau_drm(dev);
216 struct nouveau_connector *nv_connector = nouveau_connector(connector); 237 struct nouveau_connector *nv_connector = nouveau_connector(connector);
217 struct nouveau_encoder *nv_encoder = NULL; 238 struct nouveau_encoder *nv_encoder = NULL;
218 struct nouveau_encoder *nv_partner; 239 struct nouveau_encoder *nv_partner;
219 struct nouveau_i2c_chan *i2c; 240 struct nouveau_i2c_port *i2c;
220 int type; 241 int type;
221 242
222 /* Cleanup the previous EDID block. */ 243 /* Cleanup the previous EDID block. */
@@ -232,14 +253,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
232 drm_mode_connector_update_edid_property(connector, 253 drm_mode_connector_update_edid_property(connector,
233 nv_connector->edid); 254 nv_connector->edid);
234 if (!nv_connector->edid) { 255 if (!nv_connector->edid) {
235 NV_ERROR(dev, "DDC responded, but no EDID for %s\n", 256 NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
236 drm_get_connector_name(connector)); 257 drm_get_connector_name(connector));
237 goto detect_analog; 258 goto detect_analog;
238 } 259 }
239 260
240 if (nv_encoder->dcb->type == OUTPUT_DP && 261 if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
241 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) { 262 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
242 NV_ERROR(dev, "Detected %s, but failed init\n", 263 NV_ERROR(drm, "Detected %s, but failed init\n",
243 drm_get_connector_name(connector)); 264 drm_get_connector_name(connector));
244 return connector_status_disconnected; 265 return connector_status_disconnected;
245 } 266 }
@@ -250,19 +271,19 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
250 * isn't necessarily correct. 271 * isn't necessarily correct.
251 */ 272 */
252 nv_partner = NULL; 273 nv_partner = NULL;
253 if (nv_encoder->dcb->type == OUTPUT_TMDS) 274 if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
254 nv_partner = find_encoder(connector, OUTPUT_ANALOG); 275 nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
255 if (nv_encoder->dcb->type == OUTPUT_ANALOG) 276 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
256 nv_partner = find_encoder(connector, OUTPUT_TMDS); 277 nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
257 278
258 if (nv_partner && ((nv_encoder->dcb->type == OUTPUT_ANALOG && 279 if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
259 nv_partner->dcb->type == OUTPUT_TMDS) || 280 nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
260 (nv_encoder->dcb->type == OUTPUT_TMDS && 281 (nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
261 nv_partner->dcb->type == OUTPUT_ANALOG))) { 282 nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
262 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) 283 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
263 type = OUTPUT_TMDS; 284 type = DCB_OUTPUT_TMDS;
264 else 285 else
265 type = OUTPUT_ANALOG; 286 type = DCB_OUTPUT_ANALOG;
266 287
267 nv_encoder = find_encoder(connector, type); 288 nv_encoder = find_encoder(connector, type);
268 } 289 }
@@ -278,9 +299,9 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
278 } 299 }
279 300
280detect_analog: 301detect_analog:
281 nv_encoder = find_encoder(connector, OUTPUT_ANALOG); 302 nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
282 if (!nv_encoder && !nouveau_tv_disable) 303 if (!nv_encoder && !nouveau_tv_disable)
283 nv_encoder = find_encoder(connector, OUTPUT_TV); 304 nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
284 if (nv_encoder && force) { 305 if (nv_encoder && force) {
285 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 306 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
286 struct drm_encoder_helper_funcs *helper = 307 struct drm_encoder_helper_funcs *helper =
@@ -301,7 +322,7 @@ static enum drm_connector_status
301nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) 322nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
302{ 323{
303 struct drm_device *dev = connector->dev; 324 struct drm_device *dev = connector->dev;
304 struct drm_nouveau_private *dev_priv = dev->dev_private; 325 struct nouveau_drm *drm = nouveau_drm(dev);
305 struct nouveau_connector *nv_connector = nouveau_connector(connector); 326 struct nouveau_connector *nv_connector = nouveau_connector(connector);
306 struct nouveau_encoder *nv_encoder = NULL; 327 struct nouveau_encoder *nv_encoder = NULL;
307 enum drm_connector_status status = connector_status_disconnected; 328 enum drm_connector_status status = connector_status_disconnected;
@@ -313,12 +334,12 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
313 nv_connector->edid = NULL; 334 nv_connector->edid = NULL;
314 } 335 }
315 336
316 nv_encoder = find_encoder(connector, OUTPUT_LVDS); 337 nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
317 if (!nv_encoder) 338 if (!nv_encoder)
318 return connector_status_disconnected; 339 return connector_status_disconnected;
319 340
320 /* Try retrieving EDID via DDC */ 341 /* Try retrieving EDID via DDC */
321 if (!dev_priv->vbios.fp_no_ddc) { 342 if (!drm->vbios.fp_no_ddc) {
322 status = nouveau_connector_detect(connector, force); 343 status = nouveau_connector_detect(connector, force);
323 if (status == connector_status_connected) 344 if (status == connector_status_connected)
324 goto out; 345 goto out;
@@ -334,7 +355,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
334 * valid - it's not (rh#613284) 355 * valid - it's not (rh#613284)
335 */ 356 */
336 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { 357 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
337 if (!nouveau_acpi_edid(dev, connector)) { 358 if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
338 status = connector_status_connected; 359 status = connector_status_connected;
339 goto out; 360 goto out;
340 } 361 }
@@ -344,7 +365,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
344 * modeline is avalilable for the panel, set it as the panel's 365 * modeline is avalilable for the panel, set it as the panel's
345 * native mode and exit. 366 * native mode and exit.
346 */ 367 */
347 if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || 368 if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
348 nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { 369 nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
349 status = connector_status_connected; 370 status = connector_status_connected;
350 goto out; 371 goto out;
@@ -353,7 +374,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
353 /* Still nothing, some VBIOS images have a hardcoded EDID block 374 /* Still nothing, some VBIOS images have a hardcoded EDID block
354 * stored for the panel stored in them. 375 * stored for the panel stored in them.
355 */ 376 */
356 if (!dev_priv->vbios.fp_no_ddc) { 377 if (!drm->vbios.fp_no_ddc) {
357 struct edid *edid = 378 struct edid *edid =
358 (struct edid *)nouveau_bios_embedded_edid(dev); 379 (struct edid *)nouveau_bios_embedded_edid(dev);
359 if (edid) { 380 if (edid) {
@@ -379,21 +400,22 @@ out:
379static void 400static void
380nouveau_connector_force(struct drm_connector *connector) 401nouveau_connector_force(struct drm_connector *connector)
381{ 402{
403 struct nouveau_drm *drm = nouveau_drm(connector->dev);
382 struct nouveau_connector *nv_connector = nouveau_connector(connector); 404 struct nouveau_connector *nv_connector = nouveau_connector(connector);
383 struct nouveau_encoder *nv_encoder; 405 struct nouveau_encoder *nv_encoder;
384 int type; 406 int type;
385 407
386 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 408 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
387 if (connector->force == DRM_FORCE_ON_DIGITAL) 409 if (connector->force == DRM_FORCE_ON_DIGITAL)
388 type = OUTPUT_TMDS; 410 type = DCB_OUTPUT_TMDS;
389 else 411 else
390 type = OUTPUT_ANALOG; 412 type = DCB_OUTPUT_ANALOG;
391 } else 413 } else
392 type = OUTPUT_ANY; 414 type = DCB_OUTPUT_ANY;
393 415
394 nv_encoder = find_encoder(connector, type); 416 nv_encoder = find_encoder(connector, type);
395 if (!nv_encoder) { 417 if (!nv_encoder) {
396 NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", 418 NV_ERROR(drm, "can't find encoder to force %s on!\n",
397 drm_get_connector_name(connector)); 419 drm_get_connector_name(connector));
398 connector->status = connector_status_disconnected; 420 connector->status = connector_status_disconnected;
399 return; 421 return;
@@ -406,8 +428,7 @@ static int
406nouveau_connector_set_property(struct drm_connector *connector, 428nouveau_connector_set_property(struct drm_connector *connector,
407 struct drm_property *property, uint64_t value) 429 struct drm_property *property, uint64_t value)
408{ 430{
409 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 431 struct nouveau_display *disp = nouveau_display(connector->dev);
410 struct nouveau_display_engine *disp = &dev_priv->engine.display;
411 struct nouveau_connector *nv_connector = nouveau_connector(connector); 432 struct nouveau_connector *nv_connector = nouveau_connector(connector);
412 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 433 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
413 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 434 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -532,7 +553,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
532 } 553 }
533 } 554 }
534 555
535 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV) 556 if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
536 return get_slave_funcs(encoder)->set_property( 557 return get_slave_funcs(encoder)->set_property(
537 encoder, connector, property, value); 558 encoder, connector, property, value);
538 559
@@ -543,6 +564,7 @@ static struct drm_display_mode *
543nouveau_connector_native_mode(struct drm_connector *connector) 564nouveau_connector_native_mode(struct drm_connector *connector)
544{ 565{
545 struct drm_connector_helper_funcs *helper = connector->helper_private; 566 struct drm_connector_helper_funcs *helper = connector->helper_private;
567 struct nouveau_drm *drm = nouveau_drm(connector->dev);
546 struct nouveau_connector *nv_connector = nouveau_connector(connector); 568 struct nouveau_connector *nv_connector = nouveau_connector(connector);
547 struct drm_device *dev = connector->dev; 569 struct drm_device *dev = connector->dev;
548 struct drm_display_mode *mode, *largest = NULL; 570 struct drm_display_mode *mode, *largest = NULL;
@@ -556,7 +578,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
556 578
557 /* Use preferred mode if there is one.. */ 579 /* Use preferred mode if there is one.. */
558 if (mode->type & DRM_MODE_TYPE_PREFERRED) { 580 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
559 NV_DEBUG_KMS(dev, "native mode from preferred\n"); 581 NV_DEBUG(drm, "native mode from preferred\n");
560 return drm_mode_duplicate(dev, mode); 582 return drm_mode_duplicate(dev, mode);
561 } 583 }
562 584
@@ -579,7 +601,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
579 largest = mode; 601 largest = mode;
580 } 602 }
581 603
582 NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n", 604 NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
583 high_w, high_h, high_v); 605 high_w, high_h, high_v);
584 return largest ? drm_mode_duplicate(dev, largest) : NULL; 606 return largest ? drm_mode_duplicate(dev, largest) : NULL;
585} 607}
@@ -643,10 +665,10 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
643static void 665static void
644nouveau_connector_detect_depth(struct drm_connector *connector) 666nouveau_connector_detect_depth(struct drm_connector *connector)
645{ 667{
646 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 668 struct nouveau_drm *drm = nouveau_drm(connector->dev);
647 struct nouveau_connector *nv_connector = nouveau_connector(connector); 669 struct nouveau_connector *nv_connector = nouveau_connector(connector);
648 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 670 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
649 struct nvbios *bios = &dev_priv->vbios; 671 struct nvbios *bios = &drm->vbios;
650 struct drm_display_mode *mode = nv_connector->native_mode; 672 struct drm_display_mode *mode = nv_connector->native_mode;
651 bool duallink; 673 bool duallink;
652 674
@@ -661,7 +683,7 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
661 } 683 }
662 684
663 /* we're out of options unless we're LVDS, default to 8bpc */ 685 /* we're out of options unless we're LVDS, default to 8bpc */
664 if (nv_encoder->dcb->type != OUTPUT_LVDS) { 686 if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
665 connector->display_info.bpc = 8; 687 connector->display_info.bpc = 8;
666 return; 688 return;
667 } 689 }
@@ -693,7 +715,7 @@ static int
693nouveau_connector_get_modes(struct drm_connector *connector) 715nouveau_connector_get_modes(struct drm_connector *connector)
694{ 716{
695 struct drm_device *dev = connector->dev; 717 struct drm_device *dev = connector->dev;
696 struct drm_nouveau_private *dev_priv = dev->dev_private; 718 struct nouveau_drm *drm = nouveau_drm(dev);
697 struct nouveau_connector *nv_connector = nouveau_connector(connector); 719 struct nouveau_connector *nv_connector = nouveau_connector(connector);
698 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; 720 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
699 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 721 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
@@ -709,9 +731,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
709 if (nv_connector->edid) 731 if (nv_connector->edid)
710 ret = drm_add_edid_modes(connector, nv_connector->edid); 732 ret = drm_add_edid_modes(connector, nv_connector->edid);
711 else 733 else
712 if (nv_encoder->dcb->type == OUTPUT_LVDS && 734 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
713 (nv_encoder->dcb->lvdsconf.use_straps_for_mode || 735 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
714 dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { 736 drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
715 struct drm_display_mode mode; 737 struct drm_display_mode mode;
716 738
717 nouveau_bios_fp_mode(dev, &mode); 739 nouveau_bios_fp_mode(dev, &mode);
@@ -746,7 +768,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
746 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) 768 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
747 nouveau_connector_detect_depth(connector); 769 nouveau_connector_detect_depth(connector);
748 770
749 if (nv_encoder->dcb->type == OUTPUT_TV) 771 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
750 ret = get_slave_funcs(encoder)->get_modes(encoder, connector); 772 ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
751 773
752 if (nv_connector->type == DCB_CONNECTOR_LVDS || 774 if (nv_connector->type == DCB_CONNECTOR_LVDS ||
@@ -761,15 +783,15 @@ static unsigned
761get_tmds_link_bandwidth(struct drm_connector *connector) 783get_tmds_link_bandwidth(struct drm_connector *connector)
762{ 784{
763 struct nouveau_connector *nv_connector = nouveau_connector(connector); 785 struct nouveau_connector *nv_connector = nouveau_connector(connector);
764 struct drm_nouveau_private *dev_priv = connector->dev->dev_private; 786 struct nouveau_drm *drm = nouveau_drm(connector->dev);
765 struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; 787 struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
766 788
767 if (dcb->location != DCB_LOC_ON_CHIP || 789 if (dcb->location != DCB_LOC_ON_CHIP ||
768 dev_priv->chipset >= 0x46) 790 nv_device(drm->device)->chipset >= 0x46)
769 return 165000; 791 return 165000;
770 else if (dev_priv->chipset >= 0x40) 792 else if (nv_device(drm->device)->chipset >= 0x40)
771 return 155000; 793 return 155000;
772 else if (dev_priv->chipset >= 0x18) 794 else if (nv_device(drm->device)->chipset >= 0x18)
773 return 135000; 795 return 135000;
774 else 796 else
775 return 112000; 797 return 112000;
@@ -786,7 +808,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
786 unsigned clock = mode->clock; 808 unsigned clock = mode->clock;
787 809
788 switch (nv_encoder->dcb->type) { 810 switch (nv_encoder->dcb->type) {
789 case OUTPUT_LVDS: 811 case DCB_OUTPUT_LVDS:
790 if (nv_connector->native_mode && 812 if (nv_connector->native_mode &&
791 (mode->hdisplay > nv_connector->native_mode->hdisplay || 813 (mode->hdisplay > nv_connector->native_mode->hdisplay ||
792 mode->vdisplay > nv_connector->native_mode->vdisplay)) 814 mode->vdisplay > nv_connector->native_mode->vdisplay))
@@ -795,19 +817,19 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
795 min_clock = 0; 817 min_clock = 0;
796 max_clock = 400000; 818 max_clock = 400000;
797 break; 819 break;
798 case OUTPUT_TMDS: 820 case DCB_OUTPUT_TMDS:
799 max_clock = get_tmds_link_bandwidth(connector); 821 max_clock = get_tmds_link_bandwidth(connector);
800 if (nouveau_duallink && nv_encoder->dcb->duallink_possible) 822 if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
801 max_clock *= 2; 823 max_clock *= 2;
802 break; 824 break;
803 case OUTPUT_ANALOG: 825 case DCB_OUTPUT_ANALOG:
804 max_clock = nv_encoder->dcb->crtconf.maxfreq; 826 max_clock = nv_encoder->dcb->crtconf.maxfreq;
805 if (!max_clock) 827 if (!max_clock)
806 max_clock = 350000; 828 max_clock = 350000;
807 break; 829 break;
808 case OUTPUT_TV: 830 case DCB_OUTPUT_TV:
809 return get_slave_funcs(encoder)->mode_valid(encoder, mode); 831 return get_slave_funcs(encoder)->mode_valid(encoder, mode);
810 case OUTPUT_DP: 832 case DCB_OUTPUT_DP:
811 max_clock = nv_encoder->dp.link_nr; 833 max_clock = nv_encoder->dp.link_nr;
812 max_clock *= nv_encoder->dp.link_bw; 834 max_clock *= nv_encoder->dp.link_bw;
813 clock = clock * (connector->display_info.bpc * 3) / 10; 835 clock = clock * (connector->display_info.bpc * 3) / 10;
@@ -899,14 +921,15 @@ struct drm_connector *
899nouveau_connector_create(struct drm_device *dev, int index) 921nouveau_connector_create(struct drm_device *dev, int index)
900{ 922{
901 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; 923 const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
902 struct drm_nouveau_private *dev_priv = dev->dev_private; 924 struct nouveau_drm *drm = nouveau_drm(dev);
903 struct nouveau_display_engine *disp = &dev_priv->engine.display; 925 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
926 struct nouveau_display *disp = nouveau_display(dev);
904 struct nouveau_connector *nv_connector = NULL; 927 struct nouveau_connector *nv_connector = NULL;
905 struct drm_connector *connector; 928 struct drm_connector *connector;
906 int type, ret = 0; 929 int type, ret = 0;
907 bool dummy; 930 bool dummy;
908 931
909 NV_DEBUG_KMS(dev, "\n"); 932 NV_DEBUG(drm, "\n");
910 933
911 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
912 nv_connector = nouveau_connector(connector); 935 nv_connector = nouveau_connector(connector);
@@ -922,7 +945,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
922 nv_connector->index = index; 945 nv_connector->index = index;
923 946
924 /* attempt to parse vbios connector type and hotplug gpio */ 947 /* attempt to parse vbios connector type and hotplug gpio */
925 nv_connector->dcb = dcb_conn(dev, index); 948 nv_connector->dcb = olddcb_conn(dev, index);
926 if (nv_connector->dcb) { 949 if (nv_connector->dcb) {
927 static const u8 hpd[16] = { 950 static const u8 hpd[16] = {
928 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff, 951 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
@@ -930,7 +953,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
930 }; 953 };
931 954
932 u32 entry = ROM16(nv_connector->dcb[0]); 955 u32 entry = ROM16(nv_connector->dcb[0]);
933 if (dcb_conntab(dev)[3] >= 4) 956 if (olddcb_conntab(dev)[3] >= 4)
934 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16; 957 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
935 958
936 nv_connector->hpd = ffs((entry & 0x07033000) >> 12); 959 nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
@@ -939,7 +962,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
939 nv_connector->type = nv_connector->dcb[0]; 962 nv_connector->type = nv_connector->dcb[0];
940 if (drm_conntype_from_dcb(nv_connector->type) == 963 if (drm_conntype_from_dcb(nv_connector->type) ==
941 DRM_MODE_CONNECTOR_Unknown) { 964 DRM_MODE_CONNECTOR_Unknown) {
942 NV_WARN(dev, "unknown connector type %02x\n", 965 NV_WARN(drm, "unknown connector type %02x\n",
943 nv_connector->type); 966 nv_connector->type);
944 nv_connector->type = DCB_CONNECTOR_NONE; 967 nv_connector->type = DCB_CONNECTOR_NONE;
945 } 968 }
@@ -964,8 +987,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
964 * figure out something suitable ourselves 987 * figure out something suitable ourselves
965 */ 988 */
966 if (nv_connector->type == DCB_CONNECTOR_NONE) { 989 if (nv_connector->type == DCB_CONNECTOR_NONE) {
967 struct drm_nouveau_private *dev_priv = dev->dev_private; 990 struct nouveau_drm *drm = nouveau_drm(dev);
968 struct dcb_table *dcbt = &dev_priv->vbios.dcb; 991 struct dcb_table *dcbt = &drm->vbios.dcb;
969 u32 encoders = 0; 992 u32 encoders = 0;
970 int i; 993 int i;
971 994
@@ -974,25 +997,25 @@ nouveau_connector_create(struct drm_device *dev, int index)
974 encoders |= (1 << dcbt->entry[i].type); 997 encoders |= (1 << dcbt->entry[i].type);
975 } 998 }
976 999
977 if (encoders & (1 << OUTPUT_DP)) { 1000 if (encoders & (1 << DCB_OUTPUT_DP)) {
978 if (encoders & (1 << OUTPUT_TMDS)) 1001 if (encoders & (1 << DCB_OUTPUT_TMDS))
979 nv_connector->type = DCB_CONNECTOR_DP; 1002 nv_connector->type = DCB_CONNECTOR_DP;
980 else 1003 else
981 nv_connector->type = DCB_CONNECTOR_eDP; 1004 nv_connector->type = DCB_CONNECTOR_eDP;
982 } else 1005 } else
983 if (encoders & (1 << OUTPUT_TMDS)) { 1006 if (encoders & (1 << DCB_OUTPUT_TMDS)) {
984 if (encoders & (1 << OUTPUT_ANALOG)) 1007 if (encoders & (1 << DCB_OUTPUT_ANALOG))
985 nv_connector->type = DCB_CONNECTOR_DVI_I; 1008 nv_connector->type = DCB_CONNECTOR_DVI_I;
986 else 1009 else
987 nv_connector->type = DCB_CONNECTOR_DVI_D; 1010 nv_connector->type = DCB_CONNECTOR_DVI_D;
988 } else 1011 } else
989 if (encoders & (1 << OUTPUT_ANALOG)) { 1012 if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
990 nv_connector->type = DCB_CONNECTOR_VGA; 1013 nv_connector->type = DCB_CONNECTOR_VGA;
991 } else 1014 } else
992 if (encoders & (1 << OUTPUT_LVDS)) { 1015 if (encoders & (1 << DCB_OUTPUT_LVDS)) {
993 nv_connector->type = DCB_CONNECTOR_LVDS; 1016 nv_connector->type = DCB_CONNECTOR_LVDS;
994 } else 1017 } else
995 if (encoders & (1 << OUTPUT_TV)) { 1018 if (encoders & (1 << DCB_OUTPUT_TV)) {
996 nv_connector->type = DCB_CONNECTOR_TV_0; 1019 nv_connector->type = DCB_CONNECTOR_TV_0;
997 } 1020 }
998 } 1021 }
@@ -1001,7 +1024,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1001 if (type == DRM_MODE_CONNECTOR_LVDS) { 1024 if (type == DRM_MODE_CONNECTOR_LVDS) {
1002 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy); 1025 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
1003 if (ret) { 1026 if (ret) {
1004 NV_ERROR(dev, "Error parsing LVDS table, disabling\n"); 1027 NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
1005 kfree(nv_connector); 1028 kfree(nv_connector);
1006 return ERR_PTR(ret); 1029 return ERR_PTR(ret);
1007 } 1030 }
@@ -1051,7 +1074,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1051 1074
1052 switch (nv_connector->type) { 1075 switch (nv_connector->type) {
1053 case DCB_CONNECTOR_VGA: 1076 case DCB_CONNECTOR_VGA:
1054 if (dev_priv->card_type >= NV_50) { 1077 if (nv_device(drm->device)->card_type >= NV_50) {
1055 drm_connector_attach_property(connector, 1078 drm_connector_attach_property(connector,
1056 dev->mode_config.scaling_mode_property, 1079 dev->mode_config.scaling_mode_property,
1057 nv_connector->scaling_mode); 1080 nv_connector->scaling_mode);
@@ -1084,10 +1107,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
1084 } 1107 }
1085 1108
1086 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1109 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1087 if (nv_connector->hpd != DCB_GPIO_UNUSED) { 1110 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
1088 ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff, 1111 ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff,
1089 nouveau_connector_hotplug, 1112 nouveau_connector_hotplug, connector);
1090 connector);
1091 if (ret == 0) 1113 if (ret == 0)
1092 connector->polled = DRM_CONNECTOR_POLL_HPD; 1114 connector->polled = DRM_CONNECTOR_POLL_HPD;
1093 } 1115 }
@@ -1101,8 +1123,9 @@ nouveau_connector_hotplug(void *data, int plugged)
1101{ 1123{
1102 struct drm_connector *connector = data; 1124 struct drm_connector *connector = data;
1103 struct drm_device *dev = connector->dev; 1125 struct drm_device *dev = connector->dev;
1126 struct nouveau_drm *drm = nouveau_drm(dev);
1104 1127
1105 NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un", 1128 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
1106 drm_get_connector_name(connector)); 1129 drm_get_connector_name(connector));
1107 1130
1108 if (plugged) 1131 if (plugged)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index e1c1567c0c1a..ebdb87670a8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,7 +28,8 @@
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
31#include "nouveau_i2c.h" 31
32struct nouveau_i2c_port;
32 33
33enum nouveau_underscan_type { 34enum nouveau_underscan_type {
34 UNDERSCAN_OFF, 35 UNDERSCAN_OFF,
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
deleted file mode 100644
index f68cb5e71893..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include <drm/drmP.h>
34#include "nouveau_drv.h"
35
36#include <ttm/ttm_page_alloc.h>
37
38static int
39nouveau_debugfs_channel_info(struct seq_file *m, void *data)
40{
41 struct drm_info_node *node = (struct drm_info_node *) m->private;
42 struct nouveau_channel *chan = node->info_ent->data;
43
44 seq_printf(m, "channel id : %d\n", chan->id);
45
46 seq_printf(m, "cpu fifo state:\n");
47 seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
48 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
49 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
50 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
51 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
52 if (chan->dma.ib_max) {
53 seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
54 seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
55 seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
56 }
57
58 seq_printf(m, "gpu fifo state:\n");
59 seq_printf(m, " get: 0x%08x\n",
60 nvchan_rd32(chan, chan->user_get));
61 seq_printf(m, " put: 0x%08x\n",
62 nvchan_rd32(chan, chan->user_put));
63 if (chan->dma.ib_max) {
64 seq_printf(m, " ib get: 0x%08x\n",
65 nvchan_rd32(chan, 0x88));
66 seq_printf(m, " ib put: 0x%08x\n",
67 nvchan_rd32(chan, 0x8c));
68 }
69
70 return 0;
71}
72
73int
74nouveau_debugfs_channel_init(struct nouveau_channel *chan)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct drm_minor *minor = chan->dev->primary;
78 int ret;
79
80 if (!dev_priv->debugfs.channel_root) {
81 dev_priv->debugfs.channel_root =
82 debugfs_create_dir("channel", minor->debugfs_root);
83 if (!dev_priv->debugfs.channel_root)
84 return -ENOENT;
85 }
86
87 snprintf(chan->debugfs.name, 32, "%d", chan->id);
88 chan->debugfs.info.name = chan->debugfs.name;
89 chan->debugfs.info.show = nouveau_debugfs_channel_info;
90 chan->debugfs.info.driver_features = 0;
91 chan->debugfs.info.data = chan;
92
93 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
94 dev_priv->debugfs.channel_root,
95 chan->dev->primary);
96 if (ret == 0)
97 chan->debugfs.active = true;
98 return ret;
99}
100
101void
102nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
103{
104 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
105
106 if (!chan->debugfs.active)
107 return;
108
109 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
110 chan->debugfs.active = false;
111
112 if (chan == dev_priv->channel) {
113 debugfs_remove(dev_priv->debugfs.channel_root);
114 dev_priv->debugfs.channel_root = NULL;
115 }
116}
117
118static int
119nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
120{
121 struct drm_info_node *node = (struct drm_info_node *) m->private;
122 struct drm_minor *minor = node->minor;
123 struct drm_device *dev = minor->dev;
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 uint32_t ppci_0;
126
127 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
128
129 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
130 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
131 ppci_0 & 0xffff, ppci_0 >> 16);
132 return 0;
133}
134
135static int
136nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor;
140 struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
141
142 seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
143 return 0;
144}
145
146static int
147nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
148{
149 struct drm_info_node *node = (struct drm_info_node *) m->private;
150 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
151 int i;
152
153 for (i = 0; i < dev_priv->vbios.length; i++)
154 seq_printf(m, "%c", dev_priv->vbios.data[i]);
155 return 0;
156}
157
158static int
159nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
160{
161 struct drm_info_node *node = (struct drm_info_node *) m->private;
162 struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
163 int ret;
164
165 ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
166 if (ret)
167 seq_printf(m, "failed: %d", ret);
168 else
169 seq_printf(m, "succeeded\n");
170 return 0;
171}
172
173static struct drm_info_list nouveau_debugfs_list[] = {
174 { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
175 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
176 { "memory", nouveau_debugfs_memory_info, 0, NULL },
177 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
178 { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
179 { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
180};
181#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
182
183int
184nouveau_debugfs_init(struct drm_minor *minor)
185{
186 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
187 minor->debugfs_root, minor);
188 return 0;
189}
190
191void
192nouveau_debugfs_takedown(struct drm_minor *minor)
193{
194 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
195 minor);
196}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e4eeeaf20fdf..8f98e5a8c488 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -26,18 +26,21 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include "nouveau_drv.h" 29
30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h" 30#include "nouveau_fbcon.h"
32#include "nouveau_hw.h" 31#include "nouveau_hw.h"
33#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_gem.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_software.h"
37#include "nouveau_gpio.h"
38#include "nouveau_fence.h"
39#include "nv50_display.h" 36#include "nv50_display.h"
40 37
38#include "nouveau_fence.h"
39
40#include <subdev/bios/gpio.h>
41#include <subdev/gpio.h>
42#include <engine/disp.h>
43
41static void 44static void
42nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 45nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
43{ 46{
@@ -71,7 +74,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
71 struct drm_mode_fb_cmd2 *mode_cmd, 74 struct drm_mode_fb_cmd2 *mode_cmd,
72 struct nouveau_bo *nvbo) 75 struct nouveau_bo *nvbo)
73{ 76{
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 77 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct drm_framebuffer *fb = &nv_fb->base; 78 struct drm_framebuffer *fb = &nv_fb->base;
76 int ret; 79 int ret;
77 80
@@ -83,7 +86,7 @@ nouveau_framebuffer_init(struct drm_device *dev,
83 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 86 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
84 nv_fb->nvbo = nvbo; 87 nv_fb->nvbo = nvbo;
85 88
86 if (dev_priv->card_type >= NV_50) { 89 if (nv_device(drm->device)->card_type >= NV_50) {
87 u32 tile_flags = nouveau_bo_tile_layout(nvbo); 90 u32 tile_flags = nouveau_bo_tile_layout(nvbo);
88 if (tile_flags == 0x7a00 || 91 if (tile_flags == 0x7a00 ||
89 tile_flags == 0xfe00) 92 tile_flags == 0xfe00)
@@ -102,21 +105,21 @@ nouveau_framebuffer_init(struct drm_device *dev,
102 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; 105 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
103 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; 106 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
104 default: 107 default:
105 NV_ERROR(dev, "unknown depth %d\n", fb->depth); 108 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
106 return -EINVAL; 109 return -EINVAL;
107 } 110 }
108 111
109 if (dev_priv->chipset == 0x50) 112 if (nv_device(drm->device)->chipset == 0x50)
110 nv_fb->r_format |= (tile_flags << 8); 113 nv_fb->r_format |= (tile_flags << 8);
111 114
112 if (!tile_flags) { 115 if (!tile_flags) {
113 if (dev_priv->card_type < NV_D0) 116 if (nv_device(drm->device)->card_type < NV_D0)
114 nv_fb->r_pitch = 0x00100000 | fb->pitches[0]; 117 nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
115 else 118 else
116 nv_fb->r_pitch = 0x01000000 | fb->pitches[0]; 119 nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
117 } else { 120 } else {
118 u32 mode = nvbo->tile_mode; 121 u32 mode = nvbo->tile_mode;
119 if (dev_priv->card_type >= NV_C0) 122 if (nv_device(drm->device)->card_type >= NV_C0)
120 mode >>= 4; 123 mode >>= 4;
121 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode; 124 nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
122 } 125 }
@@ -212,8 +215,9 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = {
212int 215int
213nouveau_display_init(struct drm_device *dev) 216nouveau_display_init(struct drm_device *dev)
214{ 217{
215 struct drm_nouveau_private *dev_priv = dev->dev_private; 218 struct nouveau_drm *drm = nouveau_drm(dev);
216 struct nouveau_display_engine *disp = &dev_priv->engine.display; 219 struct nouveau_display *disp = nouveau_display(dev);
220 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
217 struct drm_connector *connector; 221 struct drm_connector *connector;
218 int ret; 222 int ret;
219 223
@@ -225,8 +229,8 @@ nouveau_display_init(struct drm_device *dev)
225 * some vbios default this to off for some reason, causing the 229 * some vbios default this to off for some reason, causing the
226 * panel to not work after resume 230 * panel to not work after resume
227 */ 231 */
228 if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) { 232 if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
229 nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true); 233 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
230 msleep(300); 234 msleep(300);
231 } 235 }
232 236
@@ -236,7 +240,8 @@ nouveau_display_init(struct drm_device *dev)
236 /* enable hotplug interrupts */ 240 /* enable hotplug interrupts */
237 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 241 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
238 struct nouveau_connector *conn = nouveau_connector(connector); 242 struct nouveau_connector *conn = nouveau_connector(connector);
239 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true); 243 if (gpio)
244 gpio->irq(gpio, 0, conn->hpd, 0xff, true);
240 } 245 }
241 246
242 return ret; 247 return ret;
@@ -245,35 +250,65 @@ nouveau_display_init(struct drm_device *dev)
245void 250void
246nouveau_display_fini(struct drm_device *dev) 251nouveau_display_fini(struct drm_device *dev)
247{ 252{
248 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
249 struct nouveau_display_engine *disp = &dev_priv->engine.display; 254 struct nouveau_display *disp = nouveau_display(dev);
255 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
250 struct drm_connector *connector; 256 struct drm_connector *connector;
251 257
252 /* disable hotplug interrupts */ 258 /* disable hotplug interrupts */
253 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 259 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
254 struct nouveau_connector *conn = nouveau_connector(connector); 260 struct nouveau_connector *conn = nouveau_connector(connector);
255 nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false); 261 if (gpio)
262 gpio->irq(gpio, 0, conn->hpd, 0xff, false);
256 } 263 }
257 264
258 drm_kms_helper_poll_disable(dev); 265 drm_kms_helper_poll_disable(dev);
259 disp->fini(dev); 266 disp->fini(dev);
260} 267}
261 268
269static void
270nouveau_display_vblank_notify(void *data, int crtc)
271{
272 drm_handle_vblank(data, crtc);
273}
274
275static void
276nouveau_display_vblank_get(void *data, int crtc)
277{
278 drm_vblank_get(data, crtc);
279}
280
281static void
282nouveau_display_vblank_put(void *data, int crtc)
283{
284 drm_vblank_put(data, crtc);
285}
286
262int 287int
263nouveau_display_create(struct drm_device *dev) 288nouveau_display_create(struct drm_device *dev)
264{ 289{
265 struct drm_nouveau_private *dev_priv = dev->dev_private; 290 struct nouveau_drm *drm = nouveau_drm(dev);
266 struct nouveau_display_engine *disp = &dev_priv->engine.display; 291 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
292 struct nouveau_display *disp;
267 int ret, gen; 293 int ret, gen;
268 294
295 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
296 if (!disp)
297 return -ENOMEM;
298
299 pdisp->vblank.data = dev;
300 pdisp->vblank.notify = nouveau_display_vblank_notify;
301 pdisp->vblank.get = nouveau_display_vblank_get;
302 pdisp->vblank.put = nouveau_display_vblank_put;
303
269 drm_mode_config_init(dev); 304 drm_mode_config_init(dev);
270 drm_mode_create_scaling_mode_property(dev); 305 drm_mode_create_scaling_mode_property(dev);
271 drm_mode_create_dvi_i_properties(dev); 306 drm_mode_create_dvi_i_properties(dev);
272 307
273 if (dev_priv->card_type < NV_50) 308 if (nv_device(drm->device)->card_type < NV_50)
274 gen = 0; 309 gen = 0;
275 else 310 else
276 if (dev_priv->card_type < NV_D0) 311 if (nv_device(drm->device)->card_type < NV_D0)
277 gen = 1; 312 gen = 1;
278 else 313 else
279 gen = 2; 314 gen = 2;
@@ -307,11 +342,11 @@ nouveau_display_create(struct drm_device *dev)
307 342
308 dev->mode_config.min_width = 0; 343 dev->mode_config.min_width = 0;
309 dev->mode_config.min_height = 0; 344 dev->mode_config.min_height = 0;
310 if (dev_priv->card_type < NV_10) { 345 if (nv_device(drm->device)->card_type < NV_10) {
311 dev->mode_config.max_width = 2048; 346 dev->mode_config.max_width = 2048;
312 dev->mode_config.max_height = 2048; 347 dev->mode_config.max_height = 2048;
313 } else 348 } else
314 if (dev_priv->card_type < NV_50) { 349 if (nv_device(drm->device)->card_type < NV_50) {
315 dev->mode_config.max_width = 4096; 350 dev->mode_config.max_width = 4096;
316 dev->mode_config.max_height = 4096; 351 dev->mode_config.max_height = 4096;
317 } else { 352 } else {
@@ -325,7 +360,13 @@ nouveau_display_create(struct drm_device *dev)
325 drm_kms_helper_poll_init(dev); 360 drm_kms_helper_poll_init(dev);
326 drm_kms_helper_poll_disable(dev); 361 drm_kms_helper_poll_disable(dev);
327 362
328 ret = disp->create(dev); 363 if (nv_device(drm->device)->card_type < NV_50)
364 ret = nv04_display_create(dev);
365 else
366 if (nv_device(drm->device)->card_type < NV_D0)
367 ret = nv50_display_create(dev);
368 else
369 ret = nvd0_display_create(dev);
329 if (ret) 370 if (ret)
330 goto disp_create_err; 371 goto disp_create_err;
331 372
@@ -335,10 +376,11 @@ nouveau_display_create(struct drm_device *dev)
335 goto vblank_err; 376 goto vblank_err;
336 } 377 }
337 378
379 nouveau_backlight_init(dev);
338 return 0; 380 return 0;
339 381
340vblank_err: 382vblank_err:
341 disp->destroy(dev); 383 disp->dtor(dev);
342disp_create_err: 384disp_create_err:
343 drm_kms_helper_poll_fini(dev); 385 drm_kms_helper_poll_fini(dev);
344 drm_mode_config_cleanup(dev); 386 drm_mode_config_cleanup(dev);
@@ -348,24 +390,109 @@ disp_create_err:
348void 390void
349nouveau_display_destroy(struct drm_device *dev) 391nouveau_display_destroy(struct drm_device *dev)
350{ 392{
351 struct drm_nouveau_private *dev_priv = dev->dev_private; 393 struct nouveau_display *disp = nouveau_display(dev);
352 struct nouveau_display_engine *disp = &dev_priv->engine.display;
353 394
395 nouveau_backlight_exit(dev);
354 drm_vblank_cleanup(dev); 396 drm_vblank_cleanup(dev);
355 397
356 disp->destroy(dev); 398 disp->dtor(dev);
357 399
358 drm_kms_helper_poll_fini(dev); 400 drm_kms_helper_poll_fini(dev);
359 drm_mode_config_cleanup(dev); 401 drm_mode_config_cleanup(dev);
402 nouveau_drm(dev)->display = NULL;
403 kfree(disp);
404}
405
406int
407nouveau_display_suspend(struct drm_device *dev)
408{
409 struct nouveau_drm *drm = nouveau_drm(dev);
410 struct drm_crtc *crtc;
411
412 nouveau_display_fini(dev);
413
414 NV_INFO(drm, "unpinning framebuffer(s)...\n");
415 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
416 struct nouveau_framebuffer *nouveau_fb;
417
418 nouveau_fb = nouveau_framebuffer(crtc->fb);
419 if (!nouveau_fb || !nouveau_fb->nvbo)
420 continue;
421
422 nouveau_bo_unpin(nouveau_fb->nvbo);
423 }
424
425 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
426 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
427
428 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
429 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
430 }
431
432 return 0;
433}
434
435void
436nouveau_display_resume(struct drm_device *dev)
437{
438 struct nouveau_drm *drm = nouveau_drm(dev);
439 struct drm_crtc *crtc;
440 int ret;
441
442 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
443 struct nouveau_framebuffer *nouveau_fb;
444
445 nouveau_fb = nouveau_framebuffer(crtc->fb);
446 if (!nouveau_fb || !nouveau_fb->nvbo)
447 continue;
448
449 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
450 }
451
452 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
453 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
454
455 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
456 if (!ret)
457 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
458 if (ret)
459 NV_ERROR(drm, "Could not pin/map cursor.\n");
460 }
461
462 nouveau_fbcon_set_suspend(dev, 0);
463 nouveau_fbcon_zfill_all(dev);
464
465 nouveau_display_init(dev);
466
467 /* Force CLUT to get re-loaded during modeset */
468 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
469 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
470
471 nv_crtc->lut.depth = 0;
472 }
473
474 drm_helper_resume_force_mode(dev);
475
476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
477 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
478 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
479
480 nv_crtc->cursor.set_offset(nv_crtc, offset);
481 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
482 nv_crtc->cursor_saved_y);
483 }
360} 484}
361 485
362int 486int
363nouveau_vblank_enable(struct drm_device *dev, int crtc) 487nouveau_vblank_enable(struct drm_device *dev, int crtc)
364{ 488{
365 struct drm_nouveau_private *dev_priv = dev->dev_private; 489 struct nouveau_device *device = nouveau_dev(dev);
366 490
367 if (dev_priv->card_type >= NV_50) 491 if (device->card_type >= NV_D0)
368 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, 492 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
493 else
494 if (device->card_type >= NV_50)
495 nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
369 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); 496 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
370 else 497 else
371 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 498 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
@@ -377,10 +504,13 @@ nouveau_vblank_enable(struct drm_device *dev, int crtc)
377void 504void
378nouveau_vblank_disable(struct drm_device *dev, int crtc) 505nouveau_vblank_disable(struct drm_device *dev, int crtc)
379{ 506{
380 struct drm_nouveau_private *dev_priv = dev->dev_private; 507 struct nouveau_device *device = nouveau_dev(dev);
381 508
382 if (dev_priv->card_type >= NV_50) 509 if (device->card_type >= NV_D0)
383 nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 510 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
511 else
512 if (device->card_type >= NV_50)
513 nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
384 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); 514 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
385 else 515 else
386 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); 516 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
@@ -434,15 +564,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
434 struct nouveau_page_flip_state *s, 564 struct nouveau_page_flip_state *s,
435 struct nouveau_fence **pfence) 565 struct nouveau_fence **pfence)
436{ 566{
437 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW]; 567 struct nouveau_fence_chan *fctx = chan->fence;
438 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 568 struct nouveau_drm *drm = chan->drm;
439 struct drm_device *dev = chan->dev; 569 struct drm_device *dev = drm->dev;
440 unsigned long flags; 570 unsigned long flags;
441 int ret; 571 int ret;
442 572
443 /* Queue it to the pending list */ 573 /* Queue it to the pending list */
444 spin_lock_irqsave(&dev->event_lock, flags); 574 spin_lock_irqsave(&dev->event_lock, flags);
445 list_add_tail(&s->head, &swch->flip); 575 list_add_tail(&s->head, &fctx->flip);
446 spin_unlock_irqrestore(&dev->event_lock, flags); 576 spin_unlock_irqrestore(&dev->event_lock, flags);
447 577
448 /* Synchronize with the old framebuffer */ 578 /* Synchronize with the old framebuffer */
@@ -455,7 +585,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
455 if (ret) 585 if (ret)
456 goto fail; 586 goto fail;
457 587
458 if (dev_priv->card_type < NV_C0) { 588 if (nv_device(drm->device)->card_type < NV_C0) {
459 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); 589 BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
460 OUT_RING (chan, 0x00000000); 590 OUT_RING (chan, 0x00000000);
461 OUT_RING (chan, 0x00000000); 591 OUT_RING (chan, 0x00000000);
@@ -483,7 +613,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
483 struct drm_pending_vblank_event *event) 613 struct drm_pending_vblank_event *event)
484{ 614{
485 struct drm_device *dev = crtc->dev; 615 struct drm_device *dev = crtc->dev;
486 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct nouveau_drm *drm = nouveau_drm(dev);
487 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; 617 struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
488 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; 618 struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
489 struct nouveau_page_flip_state *s; 619 struct nouveau_page_flip_state *s;
@@ -491,7 +621,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
491 struct nouveau_fence *fence; 621 struct nouveau_fence *fence;
492 int ret; 622 int ret;
493 623
494 if (!dev_priv->channel) 624 if (!drm->channel)
495 return -ENODEV; 625 return -ENODEV;
496 626
497 s = kzalloc(sizeof(*s), GFP_KERNEL); 627 s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -512,25 +642,25 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
512 /* Choose the channel the flip will be handled in */ 642 /* Choose the channel the flip will be handled in */
513 fence = new_bo->bo.sync_obj; 643 fence = new_bo->bo.sync_obj;
514 if (fence) 644 if (fence)
515 chan = nouveau_channel_get_unlocked(fence->channel); 645 chan = fence->channel;
516 if (!chan) 646 if (!chan)
517 chan = nouveau_channel_get_unlocked(dev_priv->channel); 647 chan = drm->channel;
518 mutex_lock(&chan->mutex); 648 mutex_lock(&chan->cli->mutex);
519 649
520 /* Emit a page flip */ 650 /* Emit a page flip */
521 if (dev_priv->card_type >= NV_50) { 651 if (nv_device(drm->device)->card_type >= NV_50) {
522 if (dev_priv->card_type >= NV_D0) 652 if (nv_device(drm->device)->card_type >= NV_D0)
523 ret = nvd0_display_flip_next(crtc, fb, chan, 0); 653 ret = nvd0_display_flip_next(crtc, fb, chan, 0);
524 else 654 else
525 ret = nv50_display_flip_next(crtc, fb, chan); 655 ret = nv50_display_flip_next(crtc, fb, chan);
526 if (ret) { 656 if (ret) {
527 nouveau_channel_put(&chan); 657 mutex_unlock(&chan->cli->mutex);
528 goto fail_unreserve; 658 goto fail_unreserve;
529 } 659 }
530 } 660 }
531 661
532 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 662 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
533 nouveau_channel_put(&chan); 663 mutex_unlock(&chan->cli->mutex);
534 if (ret) 664 if (ret)
535 goto fail_unreserve; 665 goto fail_unreserve;
536 666
@@ -552,20 +682,21 @@ int
552nouveau_finish_page_flip(struct nouveau_channel *chan, 682nouveau_finish_page_flip(struct nouveau_channel *chan,
553 struct nouveau_page_flip_state *ps) 683 struct nouveau_page_flip_state *ps)
554{ 684{
555 struct nouveau_software_chan *swch = chan->engctx[NVOBJ_ENGINE_SW]; 685 struct nouveau_fence_chan *fctx = chan->fence;
556 struct drm_device *dev = chan->dev; 686 struct nouveau_drm *drm = chan->drm;
687 struct drm_device *dev = drm->dev;
557 struct nouveau_page_flip_state *s; 688 struct nouveau_page_flip_state *s;
558 unsigned long flags; 689 unsigned long flags;
559 690
560 spin_lock_irqsave(&dev->event_lock, flags); 691 spin_lock_irqsave(&dev->event_lock, flags);
561 692
562 if (list_empty(&swch->flip)) { 693 if (list_empty(&fctx->flip)) {
563 NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); 694 NV_ERROR(drm, "unexpected pageflip\n");
564 spin_unlock_irqrestore(&dev->event_lock, flags); 695 spin_unlock_irqrestore(&dev->event_lock, flags);
565 return -EINVAL; 696 return -EINVAL;
566 } 697 }
567 698
568 s = list_first_entry(&swch->flip, struct nouveau_page_flip_state, head); 699 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
569 if (s->event) { 700 if (s->event) {
570 struct drm_pending_vblank_event *e = s->event; 701 struct drm_pending_vblank_event *e = s->event;
571 struct timeval now; 702 struct timeval now;
@@ -588,6 +719,24 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
588} 719}
589 720
590int 721int
722nouveau_flip_complete(void *data)
723{
724 struct nouveau_channel *chan = data;
725 struct nouveau_drm *drm = chan->drm;
726 struct nouveau_page_flip_state state;
727
728 if (!nouveau_finish_page_flip(chan, &state)) {
729 if (nv_device(drm->device)->card_type < NV_50) {
730 nv_set_crtc_base(drm->dev, state.crtc, state.offset +
731 state.y * state.pitch +
732 state.x * state.bpp / 8);
733 }
734 }
735
736 return 0;
737}
738
739int
591nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 740nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
592 struct drm_mode_create_dumb *args) 741 struct drm_mode_create_dumb *args)
593{ 742{
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
new file mode 100644
index 000000000000..722548bb3bd3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -0,0 +1,94 @@
1#ifndef __NOUVEAU_DISPLAY_H__
2#define __NOUVEAU_DISPLAY_H__
3
4#include <subdev/vm.h>
5
6#include "nouveau_drm.h"
7
8struct nouveau_framebuffer {
9 struct drm_framebuffer base;
10 struct nouveau_bo *nvbo;
11 struct nouveau_vma vma;
12 u32 r_dma;
13 u32 r_format;
14 u32 r_pitch;
15};
16
17static inline struct nouveau_framebuffer *
18nouveau_framebuffer(struct drm_framebuffer *fb)
19{
20 return container_of(fb, struct nouveau_framebuffer, base);
21}
22
23int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
24 struct drm_mode_fb_cmd2 *, struct nouveau_bo *);
25
26struct nouveau_page_flip_state {
27 struct list_head head;
28 struct drm_pending_vblank_event *event;
29 int crtc, bpp, pitch, x, y;
30 u64 offset;
31};
32
33struct nouveau_display {
34 void *priv;
35 void (*dtor)(struct drm_device *);
36 int (*init)(struct drm_device *);
37 void (*fini)(struct drm_device *);
38
39 struct drm_property *dithering_mode;
40 struct drm_property *dithering_depth;
41 struct drm_property *underscan_property;
42 struct drm_property *underscan_hborder_property;
43 struct drm_property *underscan_vborder_property;
44 /* not really hue and saturation: */
45 struct drm_property *vibrant_hue_property;
46 struct drm_property *color_vibrance_property;
47};
48
49static inline struct nouveau_display *
50nouveau_display(struct drm_device *dev)
51{
52 return nouveau_drm(dev)->display;
53}
54
55int nouveau_display_create(struct drm_device *dev);
56void nouveau_display_destroy(struct drm_device *dev);
57int nouveau_display_init(struct drm_device *dev);
58void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev);
61
62int nouveau_vblank_enable(struct drm_device *dev, int crtc);
63void nouveau_vblank_disable(struct drm_device *dev, int crtc);
64
65int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
66 struct drm_pending_vblank_event *event);
67int nouveau_finish_page_flip(struct nouveau_channel *,
68 struct nouveau_page_flip_state *);
69
70int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
71 struct drm_mode_create_dumb *args);
72int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
73 u32 handle, u64 *offset);
74int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
75 u32 handle);
76
77void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
78
79#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
80extern int nouveau_backlight_init(struct drm_device *);
81extern void nouveau_backlight_exit(struct drm_device *);
82#else
83static inline int
84nouveau_backlight_init(struct drm_device *dev)
85{
86 return 0;
87}
88
89static inline void
90nouveau_backlight_exit(struct drm_device *dev) {
91}
92#endif
93
94#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 47d041269f65..40f91e1e5842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,40 +24,16 @@
24 * 24 *
25 */ 25 */
26 26
27#include <drm/drmP.h> 27#include <core/client.h>
28#include "nouveau_drv.h"
29#include "nouveau_dma.h"
30#include "nouveau_ramht.h"
31
32void
33nouveau_dma_init(struct nouveau_channel *chan)
34{
35 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
36 struct nouveau_bo *pushbuf = chan->pushbuf_bo;
37
38 if (dev_priv->card_type >= NV_50) {
39 const int ib_size = pushbuf->bo.mem.size / 2;
40
41 chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
42 chan->dma.ib_max = (ib_size / 8) - 1;
43 chan->dma.ib_put = 0;
44 chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
45 28
46 chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; 29#include "nouveau_drm.h"
47 } else { 30#include "nouveau_dma.h"
48 chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
49 }
50
51 chan->dma.put = 0;
52 chan->dma.cur = chan->dma.put;
53 chan->dma.free = chan->dma.max - chan->dma.cur;
54}
55 31
56void 32void
57OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) 33OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
58{ 34{
59 bool is_iomem; 35 bool is_iomem;
60 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); 36 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
61 mem = &mem[chan->dma.cur]; 37 mem = &mem[chan->dma.cur];
62 if (is_iomem) 38 if (is_iomem)
63 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); 39 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -78,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
78{ 54{
79 uint64_t val; 55 uint64_t val;
80 56
81 val = nvchan_rd32(chan, chan->user_get); 57 val = nv_ro32(chan->object, chan->user_get);
82 if (chan->user_get_hi) 58 if (chan->user_get_hi)
83 val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; 59 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
84 60
85 /* reset counter as long as GET is still advancing, this is 61 /* reset counter as long as GET is still advancing, this is
86 * to avoid misdetecting a GPU lockup if the GPU happens to 62 * to avoid misdetecting a GPU lockup if the GPU happens to
@@ -92,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
92 } 68 }
93 69
94 if ((++*timeout & 0xff) == 0) { 70 if ((++*timeout & 0xff) == 0) {
95 DRM_UDELAY(1); 71 udelay(1);
96 if (*timeout > 100000) 72 if (*timeout > 100000)
97 return -EBUSY; 73 return -EBUSY;
98 } 74 }
99 75
100 if (val < chan->pushbuf_base || 76 if (val < chan->push.vma.offset ||
101 val > chan->pushbuf_base + (chan->dma.max << 2)) 77 val > chan->push.vma.offset + (chan->dma.max << 2))
102 return -EINVAL; 78 return -EINVAL;
103 79
104 return (val - chan->pushbuf_base) >> 2; 80 return (val - chan->push.vma.offset) >> 2;
105} 81}
106 82
107void 83void
108nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, 84nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
109 int delta, int length) 85 int delta, int length)
110{ 86{
111 struct nouveau_bo *pb = chan->pushbuf_bo; 87 struct nouveau_bo *pb = chan->push.buffer;
112 struct nouveau_vma *vma; 88 struct nouveau_vma *vma;
113 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; 89 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
114 u64 offset; 90 u64 offset;
115 91
116 vma = nouveau_bo_vma_find(bo, chan->vm); 92 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
117 BUG_ON(!vma); 93 BUG_ON(!vma);
118 offset = vma->offset + delta; 94 offset = vma->offset + delta;
119 95
120 BUG_ON(chan->dma.ib_free < 1); 96 BUG_ON(chan->dma.ib_free < 1);
97
121 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); 98 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
122 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 99 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
123 100
@@ -127,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
127 /* Flush writes. */ 104 /* Flush writes. */
128 nouveau_bo_rd32(pb, 0); 105 nouveau_bo_rd32(pb, 0);
129 106
130 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 107 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
131 chan->dma.ib_free--; 108 chan->dma.ib_free--;
132} 109}
133 110
@@ -137,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
137 uint32_t cnt = 0, prev_get = 0; 114 uint32_t cnt = 0, prev_get = 0;
138 115
139 while (chan->dma.ib_free < count) { 116 while (chan->dma.ib_free < count) {
140 uint32_t get = nvchan_rd32(chan, 0x88); 117 uint32_t get = nv_ro32(chan->object, 0x88);
141 if (get != prev_get) { 118 if (get != prev_get) {
142 prev_get = get; 119 prev_get = get;
143 cnt = 0; 120 cnt = 0;
@@ -248,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
248 * instruct the GPU to jump back to the start right 225 * instruct the GPU to jump back to the start right
249 * after processing the currently pending commands. 226 * after processing the currently pending commands.
250 */ 227 */
251 OUT_RING(chan, chan->pushbuf_base | 0x20000000); 228 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
252 229
253 /* wait for GET to depart from the skips area. 230 /* wait for GET to depart from the skips area.
254 * prevents writing GET==PUT and causing a race 231 * prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8db68be9544f..5c2e22932d1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -27,10 +27,10 @@
27#ifndef __NOUVEAU_DMA_H__ 27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__ 28#define __NOUVEAU_DMA_H__
29 29
30#ifndef NOUVEAU_DMA_DEBUG 30#include "nouveau_bo.h"
31#define NOUVEAU_DMA_DEBUG 0 31#include "nouveau_chan.h"
32#endif
33 32
33int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, 34void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
35 int delta, int length); 35 int delta, int length);
36 36
@@ -116,12 +116,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
116static inline void 116static inline void
117OUT_RING(struct nouveau_channel *chan, int data) 117OUT_RING(struct nouveau_channel *chan, int data)
118{ 118{
119 if (NOUVEAU_DMA_DEBUG) { 119 nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
120 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
121 chan->id, chan->dma.cur << 2, data);
122 }
123
124 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
125} 120}
126 121
127extern void 122extern void
@@ -159,24 +154,19 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
159 154
160#define WRITE_PUT(val) do { \ 155#define WRITE_PUT(val) do { \
161 DRM_MEMORYBARRIER(); \ 156 DRM_MEMORYBARRIER(); \
162 nouveau_bo_rd32(chan->pushbuf_bo, 0); \ 157 nouveau_bo_rd32(chan->push.buffer, 0); \
163 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \ 158 nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
164} while (0) 159} while (0)
165 160
166static inline void 161static inline void
167FIRE_RING(struct nouveau_channel *chan) 162FIRE_RING(struct nouveau_channel *chan)
168{ 163{
169 if (NOUVEAU_DMA_DEBUG) {
170 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
171 chan->id, chan->dma.cur << 2);
172 }
173
174 if (chan->dma.cur == chan->dma.put) 164 if (chan->dma.cur == chan->dma.put)
175 return; 165 return;
176 chan->accel_done = true; 166 chan->accel_done = true;
177 167
178 if (chan->dma.ib_max) { 168 if (chan->dma.ib_max) {
179 nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, 169 nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
180 (chan->dma.cur - chan->dma.put) << 2); 170 (chan->dma.cur - chan->dma.put) << 2);
181 } else { 171 } else {
182 WRITE_PUT(chan->dma.cur); 172 WRITE_PUT(chan->dma.cur);
@@ -191,4 +181,31 @@ WIND_RING(struct nouveau_channel *chan)
191 chan->dma.cur = chan->dma.put; 181 chan->dma.cur = chan->dma.put;
192} 182}
193 183
184/* FIFO methods */
185#define NV01_SUBCHAN_OBJECT 0x00000000
186#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
187#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
188#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
189#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
190#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
191#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
192#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
193#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
194#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
196#define NV10_SUBCHAN_REF_CNT 0x00000050
197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
198#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
199#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
200#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
201#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
202#define NV40_SUBCHAN_YIELD 0x00000080
203
204/* NV_SW object class */
205#define NV_SW_DMA_VBLSEM 0x0000018c
206#define NV_SW_VBLSEM_OFFSET 0x00000400
207#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
208#define NV_SW_VBLSEM_RELEASE 0x00000408
209#define NV_SW_PAGE_FLIP 0x00000500
210
194#endif 211#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 898e5e32293c..978a108ba7a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -23,164 +23,37 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_dp_helper.h>
26 27
27#include "nouveau_drv.h" 28#include "nouveau_drm.h"
28#include "nouveau_i2c.h"
29#include "nouveau_connector.h" 29#include "nouveau_connector.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_gpio.h"
33 32
34/****************************************************************************** 33#include <subdev/gpio.h>
35 * aux channel util functions 34#include <subdev/i2c.h>
36 *****************************************************************************/
37#define AUX_DBG(fmt, args...) do { \
38 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_AUXCH) { \
39 NV_PRINTK(KERN_DEBUG, dev, "AUXCH(%d): " fmt, ch, ##args); \
40 } \
41} while (0)
42#define AUX_ERR(fmt, args...) NV_ERROR(dev, "AUXCH(%d): " fmt, ch, ##args)
43
44static void
45auxch_fini(struct drm_device *dev, int ch)
46{
47 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
48}
49
50static int
51auxch_init(struct drm_device *dev, int ch)
52{
53 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
54 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
55 const u32 urep = unksel ? 0x01000000 : 0x02000000;
56 u32 ctrl, timeout;
57
58 /* wait up to 1ms for any previous transaction to be done... */
59 timeout = 1000;
60 do {
61 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
62 udelay(1);
63 if (!timeout--) {
64 AUX_ERR("begin idle timeout 0x%08x", ctrl);
65 return -EBUSY;
66 }
67 } while (ctrl & 0x03010000);
68
69 /* set some magic, and wait up to 1ms for it to appear */
70 nv_mask(dev, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
71 timeout = 1000;
72 do {
73 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
74 udelay(1);
75 if (!timeout--) {
76 AUX_ERR("magic wait 0x%08x\n", ctrl);
77 auxch_fini(dev, ch);
78 return -EBUSY;
79 }
80 } while ((ctrl & 0x03000000) != urep);
81
82 return 0;
83}
84
85static int
86auxch_tx(struct drm_device *dev, int ch, u8 type, u32 addr, u8 *data, u8 size)
87{
88 u32 ctrl, stat, timeout, retries;
89 u32 xbuf[4] = {};
90 int ret, i;
91
92 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
93
94 ret = auxch_init(dev, ch);
95 if (ret)
96 goto out;
97
98 stat = nv_rd32(dev, 0x00e4e8 + (ch * 0x50));
99 if (!(stat & 0x10000000)) {
100 AUX_DBG("sink not detected\n");
101 ret = -ENXIO;
102 goto out;
103 }
104
105 if (!(type & 1)) {
106 memcpy(xbuf, data, size);
107 for (i = 0; i < 16; i += 4) {
108 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
109 nv_wr32(dev, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
110 }
111 }
112
113 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
114 ctrl &= ~0x0001f0ff;
115 ctrl |= type << 12;
116 ctrl |= size - 1;
117 nv_wr32(dev, 0x00e4e0 + (ch * 0x50), addr);
118
119 /* retry transaction a number of times on failure... */
120 ret = -EREMOTEIO;
121 for (retries = 0; retries < 32; retries++) {
122 /* reset, and delay a while if this is a retry */
123 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
124 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
125 if (retries)
126 udelay(400);
127
128 /* transaction request, wait up to 1ms for it to complete */
129 nv_wr32(dev, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
130
131 timeout = 1000;
132 do {
133 ctrl = nv_rd32(dev, 0x00e4e4 + (ch * 0x50));
134 udelay(1);
135 if (!timeout--) {
136 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
137 goto out;
138 }
139 } while (ctrl & 0x00010000);
140
141 /* read status, and check if transaction completed ok */
142 stat = nv_mask(dev, 0x00e4e8 + (ch * 0x50), 0, 0);
143 if (!(stat & 0x000f0f00)) {
144 ret = 0;
145 break;
146 }
147
148 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
149 }
150
151 if (type & 1) {
152 for (i = 0; i < 16; i += 4) {
153 xbuf[i / 4] = nv_rd32(dev, 0x00e4d0 + (ch * 0x50) + i);
154 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
155 }
156 memcpy(data, xbuf, size);
157 }
158
159out:
160 auxch_fini(dev, ch);
161 return ret;
162}
163 35
164u8 * 36u8 *
165nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry) 37nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
166{ 38{
39 struct nouveau_drm *drm = nouveau_drm(dev);
167 struct bit_entry d; 40 struct bit_entry d;
168 u8 *table; 41 u8 *table;
169 int i; 42 int i;
170 43
171 if (bit_table(dev, 'd', &d)) { 44 if (bit_table(dev, 'd', &d)) {
172 NV_ERROR(dev, "BIT 'd' table not found\n"); 45 NV_ERROR(drm, "BIT 'd' table not found\n");
173 return NULL; 46 return NULL;
174 } 47 }
175 48
176 if (d.version != 1) { 49 if (d.version != 1) {
177 NV_ERROR(dev, "BIT 'd' table version %d unknown\n", d.version); 50 NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
178 return NULL; 51 return NULL;
179 } 52 }
180 53
181 table = ROMPTR(dev, d.data[0]); 54 table = ROMPTR(dev, d.data[0]);
182 if (!table) { 55 if (!table) {
183 NV_ERROR(dev, "displayport table pointer invalid\n"); 56 NV_ERROR(drm, "displayport table pointer invalid\n");
184 return NULL; 57 return NULL;
185 } 58 }
186 59
@@ -191,7 +64,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
191 case 0x40: 64 case 0x40:
192 break; 65 break;
193 default: 66 default:
194 NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]); 67 NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
195 return NULL; 68 return NULL;
196 } 69 }
197 70
@@ -201,7 +74,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
201 return table; 74 return table;
202 } 75 }
203 76
204 NV_ERROR(dev, "displayport encoder table not found\n"); 77 NV_ERROR(drm, "displayport encoder table not found\n");
205 return NULL; 78 return NULL;
206} 79}
207 80
@@ -209,9 +82,9 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
209 * link training 82 * link training
210 *****************************************************************************/ 83 *****************************************************************************/
211struct dp_state { 84struct dp_state {
85 struct nouveau_i2c_port *auxch;
212 struct dp_train_func *func; 86 struct dp_train_func *func;
213 struct dcb_entry *dcb; 87 struct dcb_output *dcb;
214 int auxch;
215 int crtc; 88 int crtc;
216 u8 *dpcd; 89 u8 *dpcd;
217 int link_nr; 90 int link_nr;
@@ -223,9 +96,10 @@ struct dp_state {
223static void 96static void
224dp_set_link_config(struct drm_device *dev, struct dp_state *dp) 97dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
225{ 98{
99 struct nouveau_drm *drm = nouveau_drm(dev);
226 u8 sink[2]; 100 u8 sink[2];
227 101
228 NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 102 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
229 103
230 /* set desired link configuration on the source */ 104 /* set desired link configuration on the source */
231 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, 105 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
@@ -237,27 +111,29 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
237 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) 111 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
238 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 112 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
239 113
240 auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2); 114 nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
241} 115}
242 116
243static void 117static void
244dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) 118dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
245{ 119{
120 struct nouveau_drm *drm = nouveau_drm(dev);
246 u8 sink_tp; 121 u8 sink_tp;
247 122
248 NV_DEBUG_KMS(dev, "training pattern %d\n", pattern); 123 NV_DEBUG(drm, "training pattern %d\n", pattern);
249 124
250 dp->func->train_set(dev, dp->dcb, pattern); 125 dp->func->train_set(dev, dp->dcb, pattern);
251 126
252 auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 127 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
253 sink_tp &= ~DP_TRAINING_PATTERN_MASK; 128 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
254 sink_tp |= pattern; 129 sink_tp |= pattern;
255 auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 130 nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
256} 131}
257 132
258static int 133static int
259dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) 134dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
260{ 135{
136 struct nouveau_drm *drm = nouveau_drm(dev);
261 int i; 137 int i;
262 138
263 for (i = 0; i < dp->link_nr; i++) { 139 for (i = 0; i < dp->link_nr; i++) {
@@ -271,27 +147,26 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
271 if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5) 147 if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
272 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 148 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
273 149
274 NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]); 150 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
275 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); 151 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
276 } 152 }
277 153
278 return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4); 154 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
279} 155}
280 156
281static int 157static int
282dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay) 158dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
283{ 159{
160 struct nouveau_drm *drm = nouveau_drm(dev);
284 int ret; 161 int ret;
285 162
286 udelay(delay); 163 udelay(delay);
287 164
288 ret = auxch_tx(dev, dp->auxch, 9, DP_LANE0_1_STATUS, dp->stat, 6); 165 ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
289 if (ret) 166 if (ret)
290 return ret; 167 return ret;
291 168
292 NV_DEBUG_KMS(dev, "status %02x %02x %02x %02x %02x %02x\n", 169 NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
293 dp->stat[0], dp->stat[1], dp->stat[2], dp->stat[3],
294 dp->stat[4], dp->stat[5]);
295 return 0; 170 return 0;
296} 171}
297 172
@@ -409,7 +284,7 @@ dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
409 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); 284 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
410} 285}
411 286
412bool 287static bool
413nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, 288nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
414 struct dp_train_func *func) 289 struct dp_train_func *func)
415{ 290{
@@ -418,19 +293,20 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
418 struct nouveau_connector *nv_connector = 293 struct nouveau_connector *nv_connector =
419 nouveau_encoder_connector_get(nv_encoder); 294 nouveau_encoder_connector_get(nv_encoder);
420 struct drm_device *dev = encoder->dev; 295 struct drm_device *dev = encoder->dev;
421 struct nouveau_i2c_chan *auxch; 296 struct nouveau_drm *drm = nouveau_drm(dev);
297 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
298 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
422 const u32 bw_list[] = { 270000, 162000, 0 }; 299 const u32 bw_list[] = { 270000, 162000, 0 };
423 const u32 *link_bw = bw_list; 300 const u32 *link_bw = bw_list;
424 struct dp_state dp; 301 struct dp_state dp;
425 302
426 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 303 dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
427 if (!auxch) 304 if (!dp.auxch)
428 return false; 305 return false;
429 306
430 dp.func = func; 307 dp.func = func;
431 dp.dcb = nv_encoder->dcb; 308 dp.dcb = nv_encoder->dcb;
432 dp.crtc = nv_crtc->index; 309 dp.crtc = nv_crtc->index;
433 dp.auxch = auxch->drive;
434 dp.dpcd = nv_encoder->dp.dpcd; 310 dp.dpcd = nv_encoder->dp.dpcd;
435 311
436 /* adjust required bandwidth for 8B/10B coding overhead */ 312 /* adjust required bandwidth for 8B/10B coding overhead */
@@ -440,7 +316,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
440 * we take during link training (DP_SET_POWER is one), we need 316 * we take during link training (DP_SET_POWER is one), we need
441 * to ignore them for the moment to avoid races. 317 * to ignore them for the moment to avoid races.
442 */ 318 */
443 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false); 319 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
444 320
445 /* enable down-spreading, if possible */ 321 /* enable down-spreading, if possible */
446 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); 322 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
@@ -483,7 +359,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
483 dp_link_train_fini(dev, &dp); 359 dp_link_train_fini(dev, &dp);
484 360
485 /* re-enable hotplug detect */ 361 /* re-enable hotplug detect */
486 nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true); 362 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
487 return true; 363 return true;
488} 364}
489 365
@@ -492,10 +368,12 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
492 struct dp_train_func *func) 368 struct dp_train_func *func)
493{ 369{
494 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 370 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
495 struct nouveau_i2c_chan *auxch; 371 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
372 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
373 struct nouveau_i2c_port *auxch;
496 u8 status; 374 u8 status;
497 375
498 auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index); 376 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
499 if (!auxch) 377 if (!auxch)
500 return; 378 return;
501 379
@@ -504,27 +382,28 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
504 else 382 else
505 status = DP_SET_POWER_D3; 383 status = DP_SET_POWER_D3;
506 384
507 nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); 385 nv_wraux(auxch, DP_SET_POWER, &status, 1);
508 386
509 if (mode == DRM_MODE_DPMS_ON) 387 if (mode == DRM_MODE_DPMS_ON)
510 nouveau_dp_link_train(encoder, datarate, func); 388 nouveau_dp_link_train(encoder, datarate, func);
511} 389}
512 390
513static void 391static void
514nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_chan *auxch, 392nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
515 u8 *dpcd) 393 u8 *dpcd)
516{ 394{
395 struct nouveau_drm *drm = nouveau_drm(dev);
517 u8 buf[3]; 396 u8 buf[3];
518 397
519 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 398 if (!(dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
520 return; 399 return;
521 400
522 if (!auxch_tx(dev, auxch->drive, 9, DP_SINK_OUI, buf, 3)) 401 if (!nv_rdaux(auxch, DP_SINK_OUI, buf, 3))
523 NV_DEBUG_KMS(dev, "Sink OUI: %02hx%02hx%02hx\n", 402 NV_DEBUG(drm, "Sink OUI: %02hx%02hx%02hx\n",
524 buf[0], buf[1], buf[2]); 403 buf[0], buf[1], buf[2]);
525 404
526 if (!auxch_tx(dev, auxch->drive, 9, DP_BRANCH_OUI, buf, 3)) 405 if (!nv_rdaux(auxch, DP_BRANCH_OUI, buf, 3))
527 NV_DEBUG_KMS(dev, "Branch OUI: %02hx%02hx%02hx\n", 406 NV_DEBUG(drm, "Branch OUI: %02hx%02hx%02hx\n",
528 buf[0], buf[1], buf[2]); 407 buf[0], buf[1], buf[2]);
529 408
530} 409}
@@ -534,24 +413,26 @@ nouveau_dp_detect(struct drm_encoder *encoder)
534{ 413{
535 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 414 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
536 struct drm_device *dev = encoder->dev; 415 struct drm_device *dev = encoder->dev;
537 struct nouveau_i2c_chan *auxch; 416 struct nouveau_drm *drm = nouveau_drm(dev);
417 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
418 struct nouveau_i2c_port *auxch;
538 u8 *dpcd = nv_encoder->dp.dpcd; 419 u8 *dpcd = nv_encoder->dp.dpcd;
539 int ret; 420 int ret;
540 421
541 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); 422 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
542 if (!auxch) 423 if (!auxch)
543 return false; 424 return false;
544 425
545 ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8); 426 ret = nv_rdaux(auxch, DP_DPCD_REV, dpcd, 8);
546 if (ret) 427 if (ret)
547 return false; 428 return false;
548 429
549 nv_encoder->dp.link_bw = 27000 * dpcd[1]; 430 nv_encoder->dp.link_bw = 27000 * dpcd[1];
550 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK; 431 nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
551 432
552 NV_DEBUG_KMS(dev, "display: %dx%d dpcd 0x%02x\n", 433 NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
553 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]); 434 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw, dpcd[0]);
554 NV_DEBUG_KMS(dev, "encoder: %dx%d\n", 435 NV_DEBUG(drm, "encoder: %dx%d\n",
555 nv_encoder->dcb->dpconf.link_nr, 436 nv_encoder->dcb->dpconf.link_nr,
556 nv_encoder->dcb->dpconf.link_bw); 437 nv_encoder->dcb->dpconf.link_bw);
557 438
@@ -560,65 +441,10 @@ nouveau_dp_detect(struct drm_encoder *encoder)
560 if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw) 441 if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
561 nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw; 442 nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
562 443
563 NV_DEBUG_KMS(dev, "maximum: %dx%d\n", 444 NV_DEBUG(drm, "maximum: %dx%d\n",
564 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw); 445 nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
565 446
566 nouveau_dp_probe_oui(dev, auxch, dpcd); 447 nouveau_dp_probe_oui(dev, auxch, dpcd);
567 448
568 return true; 449 return true;
569} 450}
570
571int
572nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
573 uint8_t *data, int data_nr)
574{
575 return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
576}
577
578static int
579nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
580{
581 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
582 struct i2c_msg *msg = msgs;
583 int ret, mcnt = num;
584
585 while (mcnt--) {
586 u8 remaining = msg->len;
587 u8 *ptr = msg->buf;
588
589 while (remaining) {
590 u8 cnt = (remaining > 16) ? 16 : remaining;
591 u8 cmd;
592
593 if (msg->flags & I2C_M_RD)
594 cmd = AUX_I2C_READ;
595 else
596 cmd = AUX_I2C_WRITE;
597
598 if (mcnt || remaining > 16)
599 cmd |= AUX_I2C_MOT;
600
601 ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt);
602 if (ret < 0)
603 return ret;
604
605 ptr += cnt;
606 remaining -= cnt;
607 }
608
609 msg++;
610 }
611
612 return num;
613}
614
615static u32
616nouveau_dp_i2c_func(struct i2c_adapter *adap)
617{
618 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
619}
620
621const struct i2c_algorithm nouveau_dp_i2c_algo = {
622 .master_xfer = nouveau_dp_i2c_xfer,
623 .functionality = nouveau_dp_i2c_func
624};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
new file mode 100644
index 000000000000..ccae8c26ae2b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -0,0 +1,693 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/console.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28
29#include <core/device.h>
30#include <core/client.h>
31#include <core/gpuobj.h>
32#include <core/class.h>
33
34#include <subdev/device.h>
35#include <subdev/vm.h>
36
37#include "nouveau_drm.h"
38#include "nouveau_irq.h"
39#include "nouveau_dma.h"
40#include "nouveau_ttm.h"
41#include "nouveau_gem.h"
42#include "nouveau_agp.h"
43#include "nouveau_vga.h"
44#include "nouveau_pm.h"
45#include "nouveau_acpi.h"
46#include "nouveau_bios.h"
47#include "nouveau_ioctl.h"
48#include "nouveau_abi16.h"
49#include "nouveau_fbcon.h"
50#include "nouveau_fence.h"
51
52#include "nouveau_ttm.h"
53
54MODULE_PARM_DESC(config, "option string to pass to driver core");
55static char *nouveau_config;
56module_param_named(config, nouveau_config, charp, 0400);
57
58MODULE_PARM_DESC(debug, "debug string to pass to driver core");
59static char *nouveau_debug;
60module_param_named(debug, nouveau_debug, charp, 0400);
61
62MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
63static int nouveau_noaccel = 0;
64module_param_named(noaccel, nouveau_noaccel, int, 0400);
65
66MODULE_PARM_DESC(modeset, "enable driver");
67static int nouveau_modeset = -1;
68module_param_named(modeset, nouveau_modeset, int, 0400);
69
70static struct drm_driver driver;
71
72static u64
73nouveau_name(struct pci_dev *pdev)
74{
75 u64 name = (u64)pci_domain_nr(pdev->bus) << 32;
76 name |= pdev->bus->number << 16;
77 name |= PCI_SLOT(pdev->devfn) << 8;
78 return name | PCI_FUNC(pdev->devfn);
79}
80
81static int
82nouveau_cli_create(struct pci_dev *pdev, const char *name,
83 int size, void **pcli)
84{
85 struct nouveau_cli *cli;
86 int ret;
87
88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
89 nouveau_debug, size, pcli);
90 cli = *pcli;
91 if (ret)
92 return ret;
93
94 mutex_init(&cli->mutex);
95 return 0;
96}
97
98static void
99nouveau_cli_destroy(struct nouveau_cli *cli)
100{
101 struct nouveau_object *client = nv_object(cli);
102 nouveau_vm_ref(NULL, &cli->base.vm, NULL);
103 nouveau_client_fini(&cli->base, false);
104 atomic_set(&client->refcount, 1);
105 nouveau_object_ref(NULL, &client);
106}
107
108static void
109nouveau_accel_fini(struct nouveau_drm *drm)
110{
111 nouveau_gpuobj_ref(NULL, &drm->notify);
112 nouveau_channel_del(&drm->channel);
113 nouveau_channel_del(&drm->cechan);
114 if (drm->fence)
115 nouveau_fence(drm)->dtor(drm);
116}
117
118static void
119nouveau_accel_init(struct nouveau_drm *drm)
120{
121 struct nouveau_device *device = nv_device(drm->device);
122 struct nouveau_object *object;
123 u32 arg0, arg1;
124 int ret;
125
126 if (nouveau_noaccel)
127 return;
128
129 /* initialise synchronisation routines */
130 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
131 else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
132 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
133 else ret = nvc0_fence_create(drm);
134 if (ret) {
135 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
136 nouveau_accel_fini(drm);
137 return;
138 }
139
140 if (device->card_type >= NV_E0) {
141 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
142 NVDRM_CHAN + 1,
143 NVE0_CHANNEL_IND_ENGINE_CE0 |
144 NVE0_CHANNEL_IND_ENGINE_CE1, 0,
145 &drm->cechan);
146 if (ret)
147 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
148
149 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
150 arg1 = 0;
151 } else {
152 arg0 = NvDmaFB;
153 arg1 = NvDmaTT;
154 }
155
156 ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
157 arg0, arg1, &drm->channel);
158 if (ret) {
159 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
160 nouveau_accel_fini(drm);
161 return;
162 }
163
164 if (device->card_type < NV_C0) {
165 ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
166 &drm->notify);
167 if (ret) {
168 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
169 nouveau_accel_fini(drm);
170 return;
171 }
172
173 ret = nouveau_object_new(nv_object(drm),
174 drm->channel->handle, NvNotify0,
175 0x003d, &(struct nv_dma_class) {
176 .flags = NV_DMA_TARGET_VRAM |
177 NV_DMA_ACCESS_RDWR,
178 .start = drm->notify->addr,
179 .limit = drm->notify->addr + 31
180 }, sizeof(struct nv_dma_class),
181 &object);
182 if (ret) {
183 nouveau_accel_fini(drm);
184 return;
185 }
186 }
187
188
189 nouveau_bo_move_init(drm);
190}
191
192static int __devinit
193nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
194{
195 struct nouveau_device *device;
196 struct apertures_struct *aper;
197 bool boot = false;
198 int ret;
199
200 /* remove conflicting drivers (vesafb, efifb etc) */
201 aper = alloc_apertures(3);
202 if (!aper)
203 return -ENOMEM;
204
205 aper->ranges[0].base = pci_resource_start(pdev, 1);
206 aper->ranges[0].size = pci_resource_len(pdev, 1);
207 aper->count = 1;
208
209 if (pci_resource_len(pdev, 2)) {
210 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
211 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
212 aper->count++;
213 }
214
215 if (pci_resource_len(pdev, 3)) {
216 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
217 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
218 aper->count++;
219 }
220
221#ifdef CONFIG_X86
222 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
223#endif
224 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
225
226 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
227 nouveau_config, nouveau_debug, &device);
228 if (ret)
229 return ret;
230
231 pci_set_master(pdev);
232
233 ret = drm_get_pci_dev(pdev, pent, &driver);
234 if (ret) {
235 nouveau_object_ref(NULL, (struct nouveau_object **)&device);
236 return ret;
237 }
238
239 return 0;
240}
241
242static int
243nouveau_drm_load(struct drm_device *dev, unsigned long flags)
244{
245 struct pci_dev *pdev = dev->pdev;
246 struct nouveau_device *device;
247 struct nouveau_drm *drm;
248 int ret;
249
250 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
251 if (ret)
252 return ret;
253
254 dev->dev_private = drm;
255 drm->dev = dev;
256
257 INIT_LIST_HEAD(&drm->clients);
258 spin_lock_init(&drm->tile.lock);
259
260 /* make sure AGP controller is in a consistent state before we
261 * (possibly) execute vbios init tables (see nouveau_agp.h)
262 */
263 if (drm_pci_device_is_agp(dev) && dev->agp) {
264 /* dummy device object, doesn't init anything, but allows
265 * agp code access to registers
266 */
267 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
268 NVDRM_DEVICE, 0x0080,
269 &(struct nv_device_class) {
270 .device = ~0,
271 .disable =
272 ~(NV_DEVICE_DISABLE_MMIO |
273 NV_DEVICE_DISABLE_IDENTIFY),
274 .debug0 = ~0,
275 }, sizeof(struct nv_device_class),
276 &drm->device);
277 if (ret)
278 goto fail_device;
279
280 nouveau_agp_reset(drm);
281 nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
282 }
283
284 ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
285 0x0080, &(struct nv_device_class) {
286 .device = ~0,
287 .disable = 0,
288 .debug0 = 0,
289 }, sizeof(struct nv_device_class),
290 &drm->device);
291 if (ret)
292 goto fail_device;
293
294 /* workaround an odd issue on nvc1 by disabling the device's
295 * nosnoop capability. hopefully won't cause issues until a
296 * better fix is found - assuming there is one...
297 */
298 device = nv_device(drm->device);
299 if (nv_device(drm->device)->chipset == 0xc1)
300 nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
301
302 nouveau_vga_init(drm);
303 nouveau_agp_init(drm);
304
305 if (device->card_type >= NV_50) {
306 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
307 0x1000, &drm->client.base.vm);
308 if (ret)
309 goto fail_device;
310 }
311
312 ret = nouveau_ttm_init(drm);
313 if (ret)
314 goto fail_ttm;
315
316 ret = nouveau_bios_init(dev);
317 if (ret)
318 goto fail_bios;
319
320 ret = nouveau_irq_init(dev);
321 if (ret)
322 goto fail_irq;
323
324 ret = nouveau_display_create(dev);
325 if (ret)
326 goto fail_dispctor;
327
328 if (dev->mode_config.num_crtc) {
329 ret = nouveau_display_init(dev);
330 if (ret)
331 goto fail_dispinit;
332 }
333
334 nouveau_pm_init(dev);
335
336 nouveau_accel_init(drm);
337 nouveau_fbcon_init(dev);
338 return 0;
339
340fail_dispinit:
341 nouveau_display_destroy(dev);
342fail_dispctor:
343 nouveau_irq_fini(dev);
344fail_irq:
345 nouveau_bios_takedown(dev);
346fail_bios:
347 nouveau_ttm_fini(drm);
348fail_ttm:
349 nouveau_agp_fini(drm);
350 nouveau_vga_fini(drm);
351fail_device:
352 nouveau_cli_destroy(&drm->client);
353 return ret;
354}
355
356static int
357nouveau_drm_unload(struct drm_device *dev)
358{
359 struct nouveau_drm *drm = nouveau_drm(dev);
360
361 nouveau_fbcon_fini(dev);
362 nouveau_accel_fini(drm);
363
364 nouveau_pm_fini(dev);
365
366 nouveau_display_fini(dev);
367 nouveau_display_destroy(dev);
368
369 nouveau_irq_fini(dev);
370 nouveau_bios_takedown(dev);
371
372 nouveau_ttm_fini(drm);
373 nouveau_agp_fini(drm);
374 nouveau_vga_fini(drm);
375
376 nouveau_cli_destroy(&drm->client);
377 return 0;
378}
379
380static void
381nouveau_drm_remove(struct pci_dev *pdev)
382{
383 struct drm_device *dev = pci_get_drvdata(pdev);
384 struct nouveau_drm *drm = nouveau_drm(dev);
385 struct nouveau_object *device;
386
387 device = drm->client.base.device;
388 drm_put_dev(dev);
389
390 nouveau_object_ref(NULL, &device);
391 nouveau_object_debug();
392}
393
394int
395nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
396{
397 struct drm_device *dev = pci_get_drvdata(pdev);
398 struct nouveau_drm *drm = nouveau_drm(dev);
399 struct nouveau_cli *cli;
400 int ret;
401
402 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
403 pm_state.event == PM_EVENT_PRETHAW)
404 return 0;
405
406 NV_INFO(drm, "suspending fbcon...\n");
407 nouveau_fbcon_set_suspend(dev, 1);
408
409 NV_INFO(drm, "suspending display...\n");
410 ret = nouveau_display_suspend(dev);
411 if (ret)
412 return ret;
413
414 NV_INFO(drm, "evicting buffers...\n");
415 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
416
417 if (drm->fence && nouveau_fence(drm)->suspend) {
418 if (!nouveau_fence(drm)->suspend(drm))
419 return -ENOMEM;
420 }
421
422 NV_INFO(drm, "suspending client object trees...\n");
423 list_for_each_entry(cli, &drm->clients, head) {
424 ret = nouveau_client_fini(&cli->base, true);
425 if (ret)
426 goto fail_client;
427 }
428
429 ret = nouveau_client_fini(&drm->client.base, true);
430 if (ret)
431 goto fail_client;
432
433 nouveau_agp_fini(drm);
434
435 pci_save_state(pdev);
436 if (pm_state.event == PM_EVENT_SUSPEND) {
437 pci_disable_device(pdev);
438 pci_set_power_state(pdev, PCI_D3hot);
439 }
440
441 return 0;
442
443fail_client:
444 list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
445 nouveau_client_init(&cli->base);
446 }
447
448 NV_INFO(drm, "resuming display...\n");
449 nouveau_display_resume(dev);
450 return ret;
451}
452
453int
454nouveau_drm_resume(struct pci_dev *pdev)
455{
456 struct drm_device *dev = pci_get_drvdata(pdev);
457 struct nouveau_drm *drm = nouveau_drm(dev);
458 struct nouveau_cli *cli;
459 int ret;
460
461 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
462 return 0;
463
464 NV_INFO(drm, "re-enabling device...\n");
465 pci_set_power_state(pdev, PCI_D0);
466 pci_restore_state(pdev);
467 ret = pci_enable_device(pdev);
468 if (ret)
469 return ret;
470 pci_set_master(pdev);
471
472 nouveau_agp_reset(drm);
473
474 NV_INFO(drm, "resuming client object trees...\n");
475 nouveau_client_init(&drm->client.base);
476 nouveau_agp_init(drm);
477
478 list_for_each_entry(cli, &drm->clients, head) {
479 nouveau_client_init(&cli->base);
480 }
481
482 if (drm->fence && nouveau_fence(drm)->resume)
483 nouveau_fence(drm)->resume(drm);
484
485 nouveau_run_vbios_init(dev);
486 nouveau_irq_postinstall(dev);
487 nouveau_pm_resume(dev);
488
489 NV_INFO(drm, "resuming display...\n");
490 nouveau_display_resume(dev);
491 return 0;
492}
493
494static int
495nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
496{
497 struct pci_dev *pdev = dev->pdev;
498 struct nouveau_drm *drm = nouveau_drm(dev);
499 struct nouveau_cli *cli;
500 char name[16];
501 int ret;
502
503 snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid));
504
505 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
506 if (ret)
507 return ret;
508
509 if (nv_device(drm->device)->card_type >= NV_50) {
510 ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
511 0x1000, &cli->base.vm);
512 if (ret) {
513 nouveau_cli_destroy(cli);
514 return ret;
515 }
516 }
517
518 fpriv->driver_priv = cli;
519
520 mutex_lock(&drm->client.mutex);
521 list_add(&cli->head, &drm->clients);
522 mutex_unlock(&drm->client.mutex);
523 return 0;
524}
525
526static void
527nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
528{
529 struct nouveau_cli *cli = nouveau_cli(fpriv);
530 struct nouveau_drm *drm = nouveau_drm(dev);
531
532 if (cli->abi16)
533 nouveau_abi16_fini(cli->abi16);
534
535 mutex_lock(&drm->client.mutex);
536 list_del(&cli->head);
537 mutex_unlock(&drm->client.mutex);
538}
539
540static void
541nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
542{
543 struct nouveau_cli *cli = nouveau_cli(fpriv);
544 nouveau_cli_destroy(cli);
545}
546
547static struct drm_ioctl_desc
548nouveau_ioctls[] = {
549 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
550 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
551 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
552 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
553 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
554 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
555 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
556 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
557 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
558 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
559 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
560 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
561};
562
563static const struct file_operations
564nouveau_driver_fops = {
565 .owner = THIS_MODULE,
566 .open = drm_open,
567 .release = drm_release,
568 .unlocked_ioctl = drm_ioctl,
569 .mmap = nouveau_ttm_mmap,
570 .poll = drm_poll,
571 .fasync = drm_fasync,
572 .read = drm_read,
573#if defined(CONFIG_COMPAT)
574 .compat_ioctl = nouveau_compat_ioctl,
575#endif
576 .llseek = noop_llseek,
577};
578
579static struct drm_driver
580driver = {
581 .driver_features =
582 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
583 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
584 DRIVER_MODESET | DRIVER_PRIME,
585
586 .load = nouveau_drm_load,
587 .unload = nouveau_drm_unload,
588 .open = nouveau_drm_open,
589 .preclose = nouveau_drm_preclose,
590 .postclose = nouveau_drm_postclose,
591 .lastclose = nouveau_vga_lastclose,
592
593 .irq_preinstall = nouveau_irq_preinstall,
594 .irq_postinstall = nouveau_irq_postinstall,
595 .irq_uninstall = nouveau_irq_uninstall,
596 .irq_handler = nouveau_irq_handler,
597
598 .get_vblank_counter = drm_vblank_count,
599 .enable_vblank = nouveau_vblank_enable,
600 .disable_vblank = nouveau_vblank_disable,
601
602 .ioctls = nouveau_ioctls,
603 .fops = &nouveau_driver_fops,
604
605 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
606 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
607 .gem_prime_export = nouveau_gem_prime_export,
608 .gem_prime_import = nouveau_gem_prime_import,
609
610 .gem_init_object = nouveau_gem_object_new,
611 .gem_free_object = nouveau_gem_object_del,
612 .gem_open_object = nouveau_gem_object_open,
613 .gem_close_object = nouveau_gem_object_close,
614
615 .dumb_create = nouveau_display_dumb_create,
616 .dumb_map_offset = nouveau_display_dumb_map_offset,
617 .dumb_destroy = nouveau_display_dumb_destroy,
618
619 .name = DRIVER_NAME,
620 .desc = DRIVER_DESC,
621#ifdef GIT_REVISION
622 .date = GIT_REVISION,
623#else
624 .date = DRIVER_DATE,
625#endif
626 .major = DRIVER_MAJOR,
627 .minor = DRIVER_MINOR,
628 .patchlevel = DRIVER_PATCHLEVEL,
629};
630
631static struct pci_device_id
632nouveau_drm_pci_table[] = {
633 {
634 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
635 .class = PCI_BASE_CLASS_DISPLAY << 16,
636 .class_mask = 0xff << 16,
637 },
638 {
639 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
640 .class = PCI_BASE_CLASS_DISPLAY << 16,
641 .class_mask = 0xff << 16,
642 },
643 {}
644};
645
646static struct pci_driver
647nouveau_drm_pci_driver = {
648 .name = "nouveau",
649 .id_table = nouveau_drm_pci_table,
650 .probe = nouveau_drm_probe,
651 .remove = nouveau_drm_remove,
652 .suspend = nouveau_drm_suspend,
653 .resume = nouveau_drm_resume,
654};
655
656static int __init
657nouveau_drm_init(void)
658{
659 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
660
661 if (nouveau_modeset == -1) {
662#ifdef CONFIG_VGA_CONSOLE
663 if (vgacon_text_force())
664 nouveau_modeset = 0;
665 else
666#endif
667 nouveau_modeset = 1;
668 }
669
670 if (!nouveau_modeset)
671 return 0;
672
673 nouveau_register_dsm_handler();
674 return drm_pci_init(&driver, &nouveau_drm_pci_driver);
675}
676
677static void __exit
678nouveau_drm_exit(void)
679{
680 if (!nouveau_modeset)
681 return;
682
683 drm_pci_exit(&driver, &nouveau_drm_pci_driver);
684 nouveau_unregister_dsm_handler();
685}
686
687module_init(nouveau_drm_init);
688module_exit(nouveau_drm_exit);
689
690MODULE_DEVICE_TABLE(pci, nouveau_drm_pci_table);
691MODULE_AUTHOR(DRIVER_AUTHOR);
692MODULE_DESCRIPTION(DRIVER_DESC);
693MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
new file mode 100644
index 000000000000..819471217546
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -0,0 +1,144 @@
1#ifndef __NOUVEAU_DRMCLI_H__
2#define __NOUVEAU_DRMCLI_H__
3
4#define DRIVER_AUTHOR "Nouveau Project"
5#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
6
7#define DRIVER_NAME "nouveau"
8#define DRIVER_DESC "nVidia Riva/TNT/GeForce/Quadro/Tesla"
9#define DRIVER_DATE "20120801"
10
11#define DRIVER_MAJOR 1
12#define DRIVER_MINOR 1
13#define DRIVER_PATCHLEVEL 0
14
15#include <core/client.h>
16
17#include <subdev/vm.h>
18
19#include <drmP.h>
20#include <drm/nouveau_drm.h>
21
22#include <drm/ttm/ttm_bo_api.h>
23#include <drm/ttm/ttm_bo_driver.h>
24#include <drm/ttm/ttm_placement.h>
25#include <drm/ttm/ttm_memory.h>
26#include <drm/ttm/ttm_module.h>
27#include <drm/ttm/ttm_page_alloc.h>
28
29struct nouveau_channel;
30
31#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
32
33#include "nouveau_fence.h"
34#include "nouveau_bios.h"
35
36struct nouveau_drm_tile {
37 struct nouveau_fence *fence;
38 bool used;
39};
40
41enum nouveau_drm_handle {
42 NVDRM_CLIENT = 0xffffffff,
43 NVDRM_DEVICE = 0xdddddddd,
44 NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
45 NVDRM_CHAN = 0xcccc0000, /* |= client chid */
46};
47
48struct nouveau_cli {
49 struct nouveau_client base;
50 struct list_head head;
51 struct mutex mutex;
52 void *abi16;
53};
54
55static inline struct nouveau_cli *
56nouveau_cli(struct drm_file *fpriv)
57{
58 return fpriv ? fpriv->driver_priv : NULL;
59}
60
61struct nouveau_drm {
62 struct nouveau_cli client;
63 struct drm_device *dev;
64
65 struct nouveau_object *device;
66 struct list_head clients;
67
68 struct {
69 enum {
70 UNKNOWN = 0,
71 DISABLE = 1,
72 ENABLED = 2
73 } stat;
74 u32 base;
75 u32 size;
76 } agp;
77
78 /* TTM interface support */
79 struct {
80 struct drm_global_reference mem_global_ref;
81 struct ttm_bo_global_ref bo_global_ref;
82 struct ttm_bo_device bdev;
83 atomic_t validate_sequence;
84 int (*move)(struct nouveau_channel *,
85 struct ttm_buffer_object *,
86 struct ttm_mem_reg *, struct ttm_mem_reg *);
87 int mtrr;
88 } ttm;
89
90 /* GEM interface support */
91 struct {
92 u64 vram_available;
93 u64 gart_available;
94 } gem;
95
96 /* synchronisation */
97 void *fence;
98
99 /* context for accelerated drm-internal operations */
100 struct nouveau_channel *cechan;
101 struct nouveau_channel *channel;
102 struct nouveau_gpuobj *notify;
103 struct nouveau_fbdev *fbcon;
104
105 /* nv10-nv40 tiling regions */
106 struct {
107 struct nouveau_drm_tile reg[15];
108 spinlock_t lock;
109 } tile;
110
111 /* modesetting */
112 struct nvbios vbios;
113 struct nouveau_display *display;
114 struct backlight_device *backlight;
115
116 /* power management */
117 struct nouveau_pm *pm;
118};
119
120static inline struct nouveau_drm *
121nouveau_drm(struct drm_device *dev)
122{
123 return dev->dev_private;
124}
125
126static inline struct nouveau_device *
127nouveau_dev(struct drm_device *dev)
128{
129 return nv_device(nouveau_drm(dev)->device);
130}
131
132int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
133int nouveau_drm_resume(struct pci_dev *);
134
135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
137#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
138#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
139#define NV_DEBUG(cli, fmt, args...) do { \
140 if (drm_debug & DRM_UT_DRIVER) \
141 nv_info((cli), fmt, ##args); \
142} while (0)
143
144#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
deleted file mode 100644
index 8b5e558d7c73..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ /dev/null
@@ -1,512 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/console.h>
26#include <linux/module.h>
27
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include "nouveau_drv.h"
31#include "nouveau_abi16.h"
32#include "nouveau_hw.h"
33#include "nouveau_fb.h"
34#include "nouveau_fbcon.h"
35#include "nouveau_pm.h"
36#include "nouveau_fifo.h"
37#include "nv50_display.h"
38
39#include <drm/drm_pciids.h>
40
41MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
42int nouveau_agpmode = -1;
43module_param_named(agpmode, nouveau_agpmode, int, 0400);
44
45MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
46int nouveau_modeset = -1;
47module_param_named(modeset, nouveau_modeset, int, 0400);
48
49MODULE_PARM_DESC(vbios, "Override default VBIOS location");
50char *nouveau_vbios;
51module_param_named(vbios, nouveau_vbios, charp, 0400);
52
53MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
54int nouveau_vram_pushbuf;
55module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
56
57MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
58int nouveau_vram_notify = 0;
59module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
60
61MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
62char *nouveau_vram_type;
63module_param_named(vram_type, nouveau_vram_type, charp, 0400);
64
65MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
66int nouveau_duallink = 1;
67module_param_named(duallink, nouveau_duallink, int, 0400);
68
69MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
70int nouveau_uscript_lvds = -1;
71module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
72
73MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
74int nouveau_uscript_tmds = -1;
75module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
76
77MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
78int nouveau_ignorelid = 0;
79module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
80
81MODULE_PARM_DESC(noaccel, "Disable all acceleration");
82int nouveau_noaccel = -1;
83module_param_named(noaccel, nouveau_noaccel, int, 0400);
84
85MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
86int nouveau_nofbaccel = 0;
87module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
88
89MODULE_PARM_DESC(force_post, "Force POST");
90int nouveau_force_post = 0;
91module_param_named(force_post, nouveau_force_post, int, 0400);
92
93MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
94int nouveau_override_conntype = 0;
95module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
96
97MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
98int nouveau_tv_disable = 0;
99module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
100
101MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
102 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
103 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
104 "\t\tDefault: PAL\n"
105 "\t\t*NOTE* Ignored for cards with external TV encoders.");
106char *nouveau_tv_norm;
107module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
108
109MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
110 "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
111 "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
112 "\t\t0x100 vgaattr, 0x200 EVO (G80+)");
113int nouveau_reg_debug;
114module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
115
116MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
117char *nouveau_perflvl;
118module_param_named(perflvl, nouveau_perflvl, charp, 0400);
119
120MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
121int nouveau_perflvl_wr;
122module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
123
124MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
125int nouveau_msi;
126module_param_named(msi, nouveau_msi, int, 0400);
127
128MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
129int nouveau_ctxfw;
130module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
131
132MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
133int nouveau_mxmdcb = 1;
134module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
135
136int nouveau_fbpercrtc;
137#if 0
138module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
139#endif
140
141static struct pci_device_id pciidlist[] = {
142 {
143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
144 .class = PCI_BASE_CLASS_DISPLAY << 16,
145 .class_mask = 0xff << 16,
146 },
147 {
148 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
149 .class = PCI_BASE_CLASS_DISPLAY << 16,
150 .class_mask = 0xff << 16,
151 },
152 {}
153};
154
155MODULE_DEVICE_TABLE(pci, pciidlist);
156
157static struct drm_driver driver;
158
159static int __devinit
160nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
161{
162 return drm_get_pci_dev(pdev, ent, &driver);
163}
164
165static void
166nouveau_pci_remove(struct pci_dev *pdev)
167{
168 struct drm_device *dev = pci_get_drvdata(pdev);
169
170 drm_put_dev(dev);
171}
172
173int
174nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
175{
176 struct drm_device *dev = pci_get_drvdata(pdev);
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
179 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
180 struct nouveau_channel *chan;
181 struct drm_crtc *crtc;
182 int ret, i, e;
183
184 if (pm_state.event == PM_EVENT_PRETHAW)
185 return 0;
186
187 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
188 return 0;
189
190 NV_INFO(dev, "Disabling display...\n");
191 nouveau_display_fini(dev);
192
193 NV_INFO(dev, "Disabling fbcon...\n");
194 nouveau_fbcon_set_suspend(dev, 1);
195
196 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
197 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
198 struct nouveau_framebuffer *nouveau_fb;
199
200 nouveau_fb = nouveau_framebuffer(crtc->fb);
201 if (!nouveau_fb || !nouveau_fb->nvbo)
202 continue;
203
204 nouveau_bo_unpin(nouveau_fb->nvbo);
205 }
206
207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
208 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
209
210 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
211 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
212 }
213
214 NV_INFO(dev, "Evicting buffers...\n");
215 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
216
217 NV_INFO(dev, "Idling channels...\n");
218 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
219 chan = dev_priv->channels.ptr[i];
220
221 if (chan && chan->pushbuf_bo)
222 nouveau_channel_idle(chan);
223 }
224
225 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
226 if (!dev_priv->eng[e])
227 continue;
228
229 ret = dev_priv->eng[e]->fini(dev, e, true);
230 if (ret) {
231 NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
232 goto out_abort;
233 }
234 }
235
236 ret = pinstmem->suspend(dev);
237 if (ret) {
238 NV_ERROR(dev, "... failed: %d\n", ret);
239 goto out_abort;
240 }
241
242 NV_INFO(dev, "Suspending GPU objects...\n");
243 ret = nouveau_gpuobj_suspend(dev);
244 if (ret) {
245 NV_ERROR(dev, "... failed: %d\n", ret);
246 pinstmem->resume(dev);
247 goto out_abort;
248 }
249
250 NV_INFO(dev, "And we're gone!\n");
251 pci_save_state(pdev);
252 if (pm_state.event == PM_EVENT_SUSPEND) {
253 pci_disable_device(pdev);
254 pci_set_power_state(pdev, PCI_D3hot);
255 }
256
257 return 0;
258
259out_abort:
260 NV_INFO(dev, "Re-enabling acceleration..\n");
261 for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
262 if (dev_priv->eng[e])
263 dev_priv->eng[e]->init(dev, e);
264 }
265 return ret;
266}
267
268int
269nouveau_pci_resume(struct pci_dev *pdev)
270{
271 struct drm_device *dev = pci_get_drvdata(pdev);
272 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
273 struct drm_nouveau_private *dev_priv = dev->dev_private;
274 struct nouveau_engine *engine = &dev_priv->engine;
275 struct drm_crtc *crtc;
276 int ret, i;
277
278 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
279 return 0;
280
281 NV_INFO(dev, "We're back, enabling device...\n");
282 pci_set_power_state(pdev, PCI_D0);
283 pci_restore_state(pdev);
284 if (pci_enable_device(pdev))
285 return -1;
286 pci_set_master(dev->pdev);
287
288 /* Make sure the AGP controller is in a consistent state */
289 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
290 nouveau_mem_reset_agp(dev);
291
292 /* Make the CRTCs accessible */
293 engine->display.early_init(dev);
294
295 NV_INFO(dev, "POSTing device...\n");
296 ret = nouveau_run_vbios_init(dev);
297 if (ret)
298 return ret;
299
300 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
301 ret = nouveau_mem_init_agp(dev);
302 if (ret) {
303 NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
304 return ret;
305 }
306 }
307
308 NV_INFO(dev, "Restoring GPU objects...\n");
309 nouveau_gpuobj_resume(dev);
310
311 NV_INFO(dev, "Reinitialising engines...\n");
312 engine->instmem.resume(dev);
313 engine->mc.init(dev);
314 engine->timer.init(dev);
315 engine->fb.init(dev);
316 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
317 if (dev_priv->eng[i])
318 dev_priv->eng[i]->init(dev, i);
319 }
320
321 nouveau_irq_postinstall(dev);
322
323 /* Re-write SKIPS, they'll have been lost over the suspend */
324 if (nouveau_vram_pushbuf) {
325 struct nouveau_channel *chan;
326 int j;
327
328 for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
329 chan = dev_priv->channels.ptr[i];
330 if (!chan || !chan->pushbuf_bo)
331 continue;
332
333 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
334 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
335 }
336 }
337
338 nouveau_pm_resume(dev);
339
340 NV_INFO(dev, "Restoring mode...\n");
341 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
342 struct nouveau_framebuffer *nouveau_fb;
343
344 nouveau_fb = nouveau_framebuffer(crtc->fb);
345 if (!nouveau_fb || !nouveau_fb->nvbo)
346 continue;
347
348 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
349 }
350
351 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
352 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
353
354 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
355 if (!ret)
356 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
357 if (ret)
358 NV_ERROR(dev, "Could not pin/map cursor.\n");
359 }
360
361 nouveau_fbcon_set_suspend(dev, 0);
362 nouveau_fbcon_zfill_all(dev);
363
364 nouveau_display_init(dev);
365
366 /* Force CLUT to get re-loaded during modeset */
367 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
368 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
369
370 nv_crtc->lut.depth = 0;
371 }
372
373 drm_helper_resume_force_mode(dev);
374
375 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
376 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
377 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
378
379 nv_crtc->cursor.set_offset(nv_crtc, offset);
380 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
381 nv_crtc->cursor_saved_y);
382 }
383
384 return 0;
385}
386
387static struct drm_ioctl_desc nouveau_ioctls[] = {
388 DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
389 DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
390 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
391 DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
392 DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
393 DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
394 DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
395 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
396 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
397 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
398 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
399 DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
400};
401
402static const struct file_operations nouveau_driver_fops = {
403 .owner = THIS_MODULE,
404 .open = drm_open,
405 .release = drm_release,
406 .unlocked_ioctl = drm_ioctl,
407 .mmap = nouveau_ttm_mmap,
408 .poll = drm_poll,
409 .fasync = drm_fasync,
410 .read = drm_read,
411#if defined(CONFIG_COMPAT)
412 .compat_ioctl = nouveau_compat_ioctl,
413#endif
414 .llseek = noop_llseek,
415};
416
417static struct drm_driver driver = {
418 .driver_features =
419 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
420 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
421 DRIVER_MODESET | DRIVER_PRIME,
422 .load = nouveau_load,
423 .firstopen = nouveau_firstopen,
424 .lastclose = nouveau_lastclose,
425 .unload = nouveau_unload,
426 .open = nouveau_open,
427 .preclose = nouveau_preclose,
428 .postclose = nouveau_postclose,
429#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
430 .debugfs_init = nouveau_debugfs_init,
431 .debugfs_cleanup = nouveau_debugfs_takedown,
432#endif
433 .irq_preinstall = nouveau_irq_preinstall,
434 .irq_postinstall = nouveau_irq_postinstall,
435 .irq_uninstall = nouveau_irq_uninstall,
436 .irq_handler = nouveau_irq_handler,
437 .get_vblank_counter = drm_vblank_count,
438 .enable_vblank = nouveau_vblank_enable,
439 .disable_vblank = nouveau_vblank_disable,
440 .ioctls = nouveau_ioctls,
441 .fops = &nouveau_driver_fops,
442
443 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
444 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
445 .gem_prime_export = nouveau_gem_prime_export,
446 .gem_prime_import = nouveau_gem_prime_import,
447
448 .gem_init_object = nouveau_gem_object_new,
449 .gem_free_object = nouveau_gem_object_del,
450 .gem_open_object = nouveau_gem_object_open,
451 .gem_close_object = nouveau_gem_object_close,
452
453 .dumb_create = nouveau_display_dumb_create,
454 .dumb_map_offset = nouveau_display_dumb_map_offset,
455 .dumb_destroy = nouveau_display_dumb_destroy,
456
457 .name = DRIVER_NAME,
458 .desc = DRIVER_DESC,
459#ifdef GIT_REVISION
460 .date = GIT_REVISION,
461#else
462 .date = DRIVER_DATE,
463#endif
464 .major = DRIVER_MAJOR,
465 .minor = DRIVER_MINOR,
466 .patchlevel = DRIVER_PATCHLEVEL,
467};
468
469static struct pci_driver nouveau_pci_driver = {
470 .name = DRIVER_NAME,
471 .id_table = pciidlist,
472 .probe = nouveau_pci_probe,
473 .remove = nouveau_pci_remove,
474 .suspend = nouveau_pci_suspend,
475 .resume = nouveau_pci_resume
476};
477
478static int __init nouveau_init(void)
479{
480 driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
481
482 if (nouveau_modeset == -1) {
483#ifdef CONFIG_VGA_CONSOLE
484 if (vgacon_text_force())
485 nouveau_modeset = 0;
486 else
487#endif
488 nouveau_modeset = 1;
489 }
490
491 if (!nouveau_modeset)
492 return 0;
493
494 nouveau_register_dsm_handler();
495 return drm_pci_init(&driver, &nouveau_pci_driver);
496}
497
498static void __exit nouveau_exit(void)
499{
500 if (!nouveau_modeset)
501 return;
502
503 drm_pci_exit(&driver, &nouveau_pci_driver);
504 nouveau_unregister_dsm_handler();
505}
506
507module_init(nouveau_init);
508module_exit(nouveau_exit);
509
510MODULE_AUTHOR(DRIVER_AUTHOR);
511MODULE_DESCRIPTION(DRIVER_DESC);
512MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
deleted file mode 100644
index 543c79bd958c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ /dev/null
@@ -1,1655 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRV_H__
26#define __NOUVEAU_DRV_H__
27
28#define DRIVER_AUTHOR "Stephane Marchesin"
29#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
30
31#define DRIVER_NAME "nouveau"
32#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
33#define DRIVER_DATE "20120316"
34
35#define DRIVER_MAJOR 1
36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 0
38
39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000
41
42#include <drm/ttm/ttm_bo_api.h>
43#include <drm/ttm/ttm_bo_driver.h>
44#include <drm/ttm/ttm_placement.h>
45#include <drm/ttm/ttm_memory.h>
46#include <drm/ttm/ttm_module.h>
47
48struct nouveau_fpriv {
49 spinlock_t lock;
50 struct list_head channels;
51 struct nouveau_vm *vm;
52};
53
54static inline struct nouveau_fpriv *
55nouveau_fpriv(struct drm_file *file_priv)
56{
57 return file_priv ? file_priv->driver_priv : NULL;
58}
59
60#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
61
62#include <drm/nouveau_drm.h>
63#include "nouveau_reg.h"
64#include "nouveau_bios.h"
65#include "nouveau_util.h"
66
67struct nouveau_grctx;
68struct nouveau_mem;
69#include "nouveau_vm.h"
70
71#define MAX_NUM_DCB_ENTRIES 16
72
73#define NOUVEAU_MAX_CHANNEL_NR 4096
74#define NOUVEAU_MAX_TILE_NR 15
75
76struct nouveau_mem {
77 struct drm_device *dev;
78
79 struct nouveau_vma bar_vma;
80 struct nouveau_vma vma[2];
81 u8 page_shift;
82
83 struct drm_mm_node *tag;
84 struct list_head regions;
85 dma_addr_t *pages;
86 u32 memtype;
87 u64 offset;
88 u64 size;
89 struct sg_table *sg;
90};
91
92struct nouveau_tile_reg {
93 bool used;
94 uint32_t addr;
95 uint32_t limit;
96 uint32_t pitch;
97 uint32_t zcomp;
98 struct drm_mm_node *tag_mem;
99 struct nouveau_fence *fence;
100};
101
102struct nouveau_bo {
103 struct ttm_buffer_object bo;
104 struct ttm_placement placement;
105 u32 valid_domains;
106 u32 placements[3];
107 u32 busy_placements[3];
108 struct ttm_bo_kmap_obj kmap;
109 struct list_head head;
110
111 /* protected by ttm_bo_reserve() */
112 struct drm_file *reserved_by;
113 struct list_head entry;
114 int pbbo_index;
115 bool validate_mapped;
116
117 struct list_head vma_list;
118 unsigned page_shift;
119
120 uint32_t tile_mode;
121 uint32_t tile_flags;
122 struct nouveau_tile_reg *tile;
123
124 struct drm_gem_object *gem;
125 int pin_refcnt;
126
127 struct ttm_bo_kmap_obj dma_buf_vmap;
128 int vmapping_count;
129};
130
131#define nouveau_bo_tile_layout(nvbo) \
132 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
133
134static inline struct nouveau_bo *
135nouveau_bo(struct ttm_buffer_object *bo)
136{
137 return container_of(bo, struct nouveau_bo, bo);
138}
139
140static inline struct nouveau_bo *
141nouveau_gem_object(struct drm_gem_object *gem)
142{
143 return gem ? gem->driver_private : NULL;
144}
145
146/* TODO: submit equivalent to TTM generic API upstream? */
147static inline void __iomem *
148nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
149{
150 bool is_iomem;
151 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
152 &nvbo->kmap, &is_iomem);
153 WARN_ON_ONCE(ioptr && !is_iomem);
154 return ioptr;
155}
156
157enum nouveau_flags {
158 NV_NFORCE = 0x10000000,
159 NV_NFORCE2 = 0x20000000
160};
161
162#define NVOBJ_ENGINE_SW 0
163#define NVOBJ_ENGINE_GR 1
164#define NVOBJ_ENGINE_CRYPT 2
165#define NVOBJ_ENGINE_COPY0 3
166#define NVOBJ_ENGINE_COPY1 4
167#define NVOBJ_ENGINE_MPEG 5
168#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
169#define NVOBJ_ENGINE_BSP 6
170#define NVOBJ_ENGINE_VP 7
171#define NVOBJ_ENGINE_FIFO 14
172#define NVOBJ_ENGINE_FENCE 15
173#define NVOBJ_ENGINE_NR 16
174#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
175
176#define NVOBJ_FLAG_DONT_MAP (1 << 0)
177#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
178#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
179#define NVOBJ_FLAG_VM (1 << 3)
180#define NVOBJ_FLAG_VM_USER (1 << 4)
181
182#define NVOBJ_CINST_GLOBAL 0xdeadbeef
183
184struct nouveau_gpuobj {
185 struct drm_device *dev;
186 struct kref refcount;
187 struct list_head list;
188
189 void *node;
190 u32 *suspend;
191
192 uint32_t flags;
193
194 u32 size;
195 u32 pinst; /* PRAMIN BAR offset */
196 u32 cinst; /* Channel offset */
197 u64 vinst; /* VRAM address */
198 u64 linst; /* VM address */
199
200 uint32_t engine;
201 uint32_t class;
202
203 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
204 void *priv;
205};
206
207struct nouveau_page_flip_state {
208 struct list_head head;
209 struct drm_pending_vblank_event *event;
210 int crtc, bpp, pitch, x, y;
211 uint64_t offset;
212};
213
214enum nouveau_channel_mutex_class {
215 NOUVEAU_UCHANNEL_MUTEX,
216 NOUVEAU_KCHANNEL_MUTEX
217};
218
219struct nouveau_channel {
220 struct drm_device *dev;
221 struct list_head list;
222 int id;
223
224 /* references to the channel data structure */
225 struct kref ref;
226 /* users of the hardware channel resources, the hardware
227 * context will be kicked off when it reaches zero. */
228 atomic_t users;
229 struct mutex mutex;
230
231 /* owner of this fifo */
232 struct drm_file *file_priv;
233 /* mapping of the fifo itself */
234 struct drm_local_map *map;
235
236 /* mapping of the regs controlling the fifo */
237 void __iomem *user;
238 uint32_t user_get;
239 uint32_t user_get_hi;
240 uint32_t user_put;
241
242 /* DMA push buffer */
243 struct nouveau_gpuobj *pushbuf;
244 struct nouveau_bo *pushbuf_bo;
245 struct nouveau_vma pushbuf_vma;
246 uint64_t pushbuf_base;
247
248 /* Notifier memory */
249 struct nouveau_bo *notifier_bo;
250 struct nouveau_vma notifier_vma;
251 struct drm_mm notifier_heap;
252
253 /* PFIFO context */
254 struct nouveau_gpuobj *ramfc;
255
256 /* Execution engine contexts */
257 void *engctx[NVOBJ_ENGINE_NR];
258
259 /* NV50 VM */
260 struct nouveau_vm *vm;
261 struct nouveau_gpuobj *vm_pd;
262
263 /* Objects */
264 struct nouveau_gpuobj *ramin; /* Private instmem */
265 struct drm_mm ramin_heap; /* Private PRAMIN heap */
266 struct nouveau_ramht *ramht; /* Hash table */
267
268 /* GPU object info for stuff used in-kernel (mm_enabled) */
269 uint32_t m2mf_ntfy;
270 uint32_t vram_handle;
271 uint32_t gart_handle;
272 bool accel_done;
273
274 /* Push buffer state (only for drm's channel on !mm_enabled) */
275 struct {
276 int max;
277 int free;
278 int cur;
279 int put;
280 /* access via pushbuf_bo */
281
282 int ib_base;
283 int ib_max;
284 int ib_free;
285 int ib_put;
286 } dma;
287
288 struct {
289 bool active;
290 char name[32];
291 struct drm_info_list info;
292 } debugfs;
293};
294
295struct nouveau_exec_engine {
296 void (*destroy)(struct drm_device *, int engine);
297 int (*init)(struct drm_device *, int engine);
298 int (*fini)(struct drm_device *, int engine, bool suspend);
299 int (*context_new)(struct nouveau_channel *, int engine);
300 void (*context_del)(struct nouveau_channel *, int engine);
301 int (*object_new)(struct nouveau_channel *, int engine,
302 u32 handle, u16 class);
303 void (*set_tile_region)(struct drm_device *dev, int i);
304 void (*tlb_flush)(struct drm_device *, int engine);
305};
306
307struct nouveau_instmem_engine {
308 void *priv;
309
310 int (*init)(struct drm_device *dev);
311 void (*takedown)(struct drm_device *dev);
312 int (*suspend)(struct drm_device *dev);
313 void (*resume)(struct drm_device *dev);
314
315 int (*get)(struct nouveau_gpuobj *, struct nouveau_channel *,
316 u32 size, u32 align);
317 void (*put)(struct nouveau_gpuobj *);
318 int (*map)(struct nouveau_gpuobj *);
319 void (*unmap)(struct nouveau_gpuobj *);
320
321 void (*flush)(struct drm_device *);
322};
323
324struct nouveau_mc_engine {
325 int (*init)(struct drm_device *dev);
326 void (*takedown)(struct drm_device *dev);
327};
328
329struct nouveau_timer_engine {
330 int (*init)(struct drm_device *dev);
331 void (*takedown)(struct drm_device *dev);
332 uint64_t (*read)(struct drm_device *dev);
333};
334
335struct nouveau_fb_engine {
336 int num_tiles;
337 struct drm_mm tag_heap;
338 void *priv;
339
340 int (*init)(struct drm_device *dev);
341 void (*takedown)(struct drm_device *dev);
342
343 void (*init_tile_region)(struct drm_device *dev, int i,
344 uint32_t addr, uint32_t size,
345 uint32_t pitch, uint32_t flags);
346 void (*set_tile_region)(struct drm_device *dev, int i);
347 void (*free_tile_region)(struct drm_device *dev, int i);
348};
349
350struct nouveau_display_engine {
351 void *priv;
352 int (*early_init)(struct drm_device *);
353 void (*late_takedown)(struct drm_device *);
354 int (*create)(struct drm_device *);
355 void (*destroy)(struct drm_device *);
356 int (*init)(struct drm_device *);
357 void (*fini)(struct drm_device *);
358
359 struct drm_property *dithering_mode;
360 struct drm_property *dithering_depth;
361 struct drm_property *underscan_property;
362 struct drm_property *underscan_hborder_property;
363 struct drm_property *underscan_vborder_property;
364 /* not really hue and saturation: */
365 struct drm_property *vibrant_hue_property;
366 struct drm_property *color_vibrance_property;
367};
368
369struct nouveau_gpio_engine {
370 spinlock_t lock;
371 struct list_head isr;
372 int (*init)(struct drm_device *);
373 void (*fini)(struct drm_device *);
374 int (*drive)(struct drm_device *, int line, int dir, int out);
375 int (*sense)(struct drm_device *, int line);
376 void (*irq_enable)(struct drm_device *, int line, bool);
377};
378
379struct nouveau_pm_voltage_level {
380 u32 voltage; /* microvolts */
381 u8 vid;
382};
383
384struct nouveau_pm_voltage {
385 bool supported;
386 u8 version;
387 u8 vid_mask;
388
389 struct nouveau_pm_voltage_level *level;
390 int nr_level;
391};
392
393/* Exclusive upper limits */
394#define NV_MEM_CL_DDR2_MAX 8
395#define NV_MEM_WR_DDR2_MAX 9
396#define NV_MEM_CL_DDR3_MAX 17
397#define NV_MEM_WR_DDR3_MAX 17
398#define NV_MEM_CL_GDDR3_MAX 16
399#define NV_MEM_WR_GDDR3_MAX 18
400#define NV_MEM_CL_GDDR5_MAX 21
401#define NV_MEM_WR_GDDR5_MAX 20
402
403struct nouveau_pm_memtiming {
404 int id;
405
406 u32 reg[9];
407 u32 mr[4];
408
409 u8 tCWL;
410
411 u8 odt;
412 u8 drive_strength;
413};
414
415struct nouveau_pm_tbl_header {
416 u8 version;
417 u8 header_len;
418 u8 entry_cnt;
419 u8 entry_len;
420};
421
422struct nouveau_pm_tbl_entry {
423 u8 tWR;
424 u8 tWTR;
425 u8 tCL;
426 u8 tRC;
427 u8 empty_4;
428 u8 tRFC; /* Byte 5 */
429 u8 empty_6;
430 u8 tRAS; /* Byte 7 */
431 u8 empty_8;
432 u8 tRP; /* Byte 9 */
433 u8 tRCDRD;
434 u8 tRCDWR;
435 u8 tRRD;
436 u8 tUNK_13;
437 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
438 u8 empty_15;
439 u8 tUNK_16;
440 u8 empty_17;
441 u8 tUNK_18;
442 u8 tCWL;
443 u8 tUNK_20, tUNK_21;
444};
445
446struct nouveau_pm_profile;
447struct nouveau_pm_profile_func {
448 void (*destroy)(struct nouveau_pm_profile *);
449 void (*init)(struct nouveau_pm_profile *);
450 void (*fini)(struct nouveau_pm_profile *);
451 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
452};
453
454struct nouveau_pm_profile {
455 const struct nouveau_pm_profile_func *func;
456 struct list_head head;
457 char name[8];
458};
459
460#define NOUVEAU_PM_MAX_LEVEL 8
461struct nouveau_pm_level {
462 struct nouveau_pm_profile profile;
463 struct device_attribute dev_attr;
464 char name[32];
465 int id;
466
467 struct nouveau_pm_memtiming timing;
468 u32 memory;
469 u16 memscript;
470
471 u32 core;
472 u32 shader;
473 u32 rop;
474 u32 copy;
475 u32 daemon;
476 u32 vdec;
477 u32 dom6;
478 u32 unka0; /* nva3:nvc0 */
479 u32 hub01; /* nvc0- */
480 u32 hub06; /* nvc0- */
481 u32 hub07; /* nvc0- */
482
483 u32 volt_min; /* microvolts */
484 u32 volt_max;
485 u8 fanspeed;
486};
487
488struct nouveau_pm_temp_sensor_constants {
489 u16 offset_constant;
490 s16 offset_mult;
491 s16 offset_div;
492 s16 slope_mult;
493 s16 slope_div;
494};
495
496struct nouveau_pm_threshold_temp {
497 s16 critical;
498 s16 down_clock;
499 s16 fan_boost;
500};
501
502struct nouveau_pm_fan {
503 u32 percent;
504 u32 min_duty;
505 u32 max_duty;
506 u32 pwm_freq;
507 u32 pwm_divisor;
508};
509
510struct nouveau_pm_engine {
511 struct nouveau_pm_voltage voltage;
512 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
513 int nr_perflvl;
514 struct nouveau_pm_temp_sensor_constants sensor_constants;
515 struct nouveau_pm_threshold_temp threshold_temp;
516 struct nouveau_pm_fan fan;
517
518 struct nouveau_pm_profile *profile_ac;
519 struct nouveau_pm_profile *profile_dc;
520 struct nouveau_pm_profile *profile;
521 struct list_head profiles;
522
523 struct nouveau_pm_level boot;
524 struct nouveau_pm_level *cur;
525
526 struct device *hwmon;
527 struct notifier_block acpi_nb;
528
529 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
530 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
531 int (*clocks_set)(struct drm_device *, void *);
532
533 int (*voltage_get)(struct drm_device *);
534 int (*voltage_set)(struct drm_device *, int voltage);
535 int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
536 int (*pwm_set)(struct drm_device *, int line, u32, u32);
537 int (*temp_get)(struct drm_device *);
538};
539
540struct nouveau_vram_engine {
541 struct nouveau_mm mm;
542
543 int (*init)(struct drm_device *);
544 void (*takedown)(struct drm_device *dev);
545 int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
546 u32 type, struct nouveau_mem **);
547 void (*put)(struct drm_device *, struct nouveau_mem **);
548
549 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
550};
551
552struct nouveau_engine {
553 struct nouveau_instmem_engine instmem;
554 struct nouveau_mc_engine mc;
555 struct nouveau_timer_engine timer;
556 struct nouveau_fb_engine fb;
557 struct nouveau_display_engine display;
558 struct nouveau_gpio_engine gpio;
559 struct nouveau_pm_engine pm;
560 struct nouveau_vram_engine vram;
561};
562
563struct nouveau_pll_vals {
564 union {
565 struct {
566#ifdef __BIG_ENDIAN
567 uint8_t N1, M1, N2, M2;
568#else
569 uint8_t M1, N1, M2, N2;
570#endif
571 };
572 struct {
573 uint16_t NM1, NM2;
574 } __attribute__((packed));
575 };
576 int log2P;
577
578 int refclk;
579};
580
581enum nv04_fp_display_regs {
582 FP_DISPLAY_END,
583 FP_TOTAL,
584 FP_CRTC,
585 FP_SYNC_START,
586 FP_SYNC_END,
587 FP_VALID_START,
588 FP_VALID_END
589};
590
591struct nv04_crtc_reg {
592 unsigned char MiscOutReg;
593 uint8_t CRTC[0xa0];
594 uint8_t CR58[0x10];
595 uint8_t Sequencer[5];
596 uint8_t Graphics[9];
597 uint8_t Attribute[21];
598 unsigned char DAC[768];
599
600 /* PCRTC regs */
601 uint32_t fb_start;
602 uint32_t crtc_cfg;
603 uint32_t cursor_cfg;
604 uint32_t gpio_ext;
605 uint32_t crtc_830;
606 uint32_t crtc_834;
607 uint32_t crtc_850;
608 uint32_t crtc_eng_ctrl;
609
610 /* PRAMDAC regs */
611 uint32_t nv10_cursync;
612 struct nouveau_pll_vals pllvals;
613 uint32_t ramdac_gen_ctrl;
614 uint32_t ramdac_630;
615 uint32_t ramdac_634;
616 uint32_t tv_setup;
617 uint32_t tv_vtotal;
618 uint32_t tv_vskew;
619 uint32_t tv_vsync_delay;
620 uint32_t tv_htotal;
621 uint32_t tv_hskew;
622 uint32_t tv_hsync_delay;
623 uint32_t tv_hsync_delay2;
624 uint32_t fp_horiz_regs[7];
625 uint32_t fp_vert_regs[7];
626 uint32_t dither;
627 uint32_t fp_control;
628 uint32_t dither_regs[6];
629 uint32_t fp_debug_0;
630 uint32_t fp_debug_1;
631 uint32_t fp_debug_2;
632 uint32_t fp_margin_color;
633 uint32_t ramdac_8c0;
634 uint32_t ramdac_a20;
635 uint32_t ramdac_a24;
636 uint32_t ramdac_a34;
637 uint32_t ctv_regs[38];
638};
639
640struct nv04_output_reg {
641 uint32_t output;
642 int head;
643};
644
645struct nv04_mode_state {
646 struct nv04_crtc_reg crtc_reg[2];
647 uint32_t pllsel;
648 uint32_t sel_clk;
649};
650
651enum nouveau_card_type {
652 NV_04 = 0x04,
653 NV_10 = 0x10,
654 NV_20 = 0x20,
655 NV_30 = 0x30,
656 NV_40 = 0x40,
657 NV_50 = 0x50,
658 NV_C0 = 0xc0,
659 NV_D0 = 0xd0,
660 NV_E0 = 0xe0,
661};
662
663struct drm_nouveau_private {
664 struct drm_device *dev;
665 bool noaccel;
666
667 /* the card type, takes NV_* as values */
668 enum nouveau_card_type card_type;
669 /* exact chipset, derived from NV_PMC_BOOT_0 */
670 int chipset;
671 int flags;
672 u32 crystal;
673
674 void __iomem *mmio;
675
676 spinlock_t ramin_lock;
677 void __iomem *ramin;
678 u32 ramin_size;
679 u32 ramin_base;
680 bool ramin_available;
681 struct drm_mm ramin_heap;
682 struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
683 struct list_head gpuobj_list;
684 struct list_head classes;
685
686 struct nouveau_bo *vga_ram;
687
688 /* interrupt handling */
689 void (*irq_handler[32])(struct drm_device *);
690 bool msi_enabled;
691
692 struct {
693 struct drm_global_reference mem_global_ref;
694 struct ttm_bo_global_ref bo_global_ref;
695 struct ttm_bo_device bdev;
696 atomic_t validate_sequence;
697 int (*move)(struct nouveau_channel *,
698 struct ttm_buffer_object *,
699 struct ttm_mem_reg *, struct ttm_mem_reg *);
700 } ttm;
701
702 struct {
703 spinlock_t lock;
704 struct drm_mm heap;
705 struct nouveau_bo *bo;
706 } fence;
707
708 struct {
709 spinlock_t lock;
710 struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
711 } channels;
712
713 struct nouveau_engine engine;
714 struct nouveau_channel *channel;
715
716 /* For PFIFO and PGRAPH. */
717 spinlock_t context_switch_lock;
718
719 /* VM/PRAMIN flush, legacy PRAMIN aperture */
720 spinlock_t vm_lock;
721
722 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
723 struct nouveau_ramht *ramht;
724 struct nouveau_gpuobj *ramfc;
725 struct nouveau_gpuobj *ramro;
726
727 uint32_t ramin_rsvd_vram;
728
729 struct {
730 enum {
731 NOUVEAU_GART_NONE = 0,
732 NOUVEAU_GART_AGP, /* AGP */
733 NOUVEAU_GART_PDMA, /* paged dma object */
734 NOUVEAU_GART_HW /* on-chip gart/vm */
735 } type;
736 uint64_t aper_base;
737 uint64_t aper_size;
738 uint64_t aper_free;
739
740 struct ttm_backend_func *func;
741
742 struct {
743 struct page *page;
744 dma_addr_t addr;
745 } dummy;
746
747 struct nouveau_gpuobj *sg_ctxdma;
748 } gart_info;
749
750 /* nv10-nv40 tiling regions */
751 struct {
752 struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
753 spinlock_t lock;
754 } tile;
755
756 /* VRAM/fb configuration */
757 enum {
758 NV_MEM_TYPE_UNKNOWN = 0,
759 NV_MEM_TYPE_STOLEN,
760 NV_MEM_TYPE_SGRAM,
761 NV_MEM_TYPE_SDRAM,
762 NV_MEM_TYPE_DDR1,
763 NV_MEM_TYPE_DDR2,
764 NV_MEM_TYPE_DDR3,
765 NV_MEM_TYPE_GDDR2,
766 NV_MEM_TYPE_GDDR3,
767 NV_MEM_TYPE_GDDR4,
768 NV_MEM_TYPE_GDDR5
769 } vram_type;
770 uint64_t vram_size;
771 uint64_t vram_sys_base;
772 bool vram_rank_B;
773
774 uint64_t fb_available_size;
775 uint64_t fb_mappable_pages;
776 uint64_t fb_aper_free;
777 int fb_mtrr;
778
779 /* BAR control (NV50-) */
780 struct nouveau_vm *bar1_vm;
781 struct nouveau_vm *bar3_vm;
782
783 /* G8x/G9x virtual address space */
784 struct nouveau_vm *chan_vm;
785
786 struct nvbios vbios;
787 u8 *mxms;
788 struct list_head i2c_ports;
789
790 struct nv04_mode_state mode_reg;
791 struct nv04_mode_state saved_reg;
792 uint32_t saved_vga_font[4][16384];
793 uint32_t crtc_owner;
794 uint32_t dac_users[4];
795
796 struct backlight_device *backlight;
797
798 struct {
799 struct dentry *channel_root;
800 } debugfs;
801
802 struct nouveau_fbdev *nfbdev;
803 struct apertures_struct *apertures;
804};
805
806static inline struct drm_nouveau_private *
807nouveau_private(struct drm_device *dev)
808{
809 return dev->dev_private;
810}
811
812static inline struct drm_nouveau_private *
813nouveau_bdev(struct ttm_bo_device *bd)
814{
815 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
816}
817
818static inline int
819nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
820{
821 struct nouveau_bo *prev;
822
823 if (!pnvbo)
824 return -EINVAL;
825 prev = *pnvbo;
826
827 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
828 if (prev) {
829 struct ttm_buffer_object *bo = &prev->bo;
830
831 ttm_bo_unref(&bo);
832 }
833
834 return 0;
835}
836
837/* nouveau_drv.c */
838extern int nouveau_modeset;
839extern int nouveau_agpmode;
840extern int nouveau_duallink;
841extern int nouveau_uscript_lvds;
842extern int nouveau_uscript_tmds;
843extern int nouveau_vram_pushbuf;
844extern int nouveau_vram_notify;
845extern char *nouveau_vram_type;
846extern int nouveau_fbpercrtc;
847extern int nouveau_tv_disable;
848extern char *nouveau_tv_norm;
849extern int nouveau_reg_debug;
850extern char *nouveau_vbios;
851extern int nouveau_ignorelid;
852extern int nouveau_nofbaccel;
853extern int nouveau_noaccel;
854extern int nouveau_force_post;
855extern int nouveau_override_conntype;
856extern char *nouveau_perflvl;
857extern int nouveau_perflvl_wr;
858extern int nouveau_msi;
859extern int nouveau_ctxfw;
860extern int nouveau_mxmdcb;
861
862extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
863extern int nouveau_pci_resume(struct pci_dev *pdev);
864
865/* nouveau_state.c */
866extern int nouveau_open(struct drm_device *, struct drm_file *);
867extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
868extern void nouveau_postclose(struct drm_device *, struct drm_file *);
869extern int nouveau_load(struct drm_device *, unsigned long flags);
870extern int nouveau_firstopen(struct drm_device *);
871extern void nouveau_lastclose(struct drm_device *);
872extern int nouveau_unload(struct drm_device *);
873extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
874 uint32_t reg, uint32_t mask, uint32_t val);
875extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
876 uint32_t reg, uint32_t mask, uint32_t val);
877extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
878 bool (*cond)(void *), void *);
879extern bool nouveau_wait_for_idle(struct drm_device *);
880extern int nouveau_card_init(struct drm_device *);
881
882/* nouveau_mem.c */
883extern int nouveau_mem_vram_init(struct drm_device *);
884extern void nouveau_mem_vram_fini(struct drm_device *);
885extern int nouveau_mem_gart_init(struct drm_device *);
886extern void nouveau_mem_gart_fini(struct drm_device *);
887extern int nouveau_mem_init_agp(struct drm_device *);
888extern int nouveau_mem_reset_agp(struct drm_device *);
889extern void nouveau_mem_close(struct drm_device *);
890extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
891extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
892 struct nouveau_pm_memtiming *);
893extern void nouveau_mem_timing_read(struct drm_device *,
894 struct nouveau_pm_memtiming *);
895extern int nouveau_mem_vbios_type(struct drm_device *);
896extern struct nouveau_tile_reg *nv10_mem_set_tiling(
897 struct drm_device *dev, uint32_t addr, uint32_t size,
898 uint32_t pitch, uint32_t flags);
899extern void nv10_mem_put_tile_region(struct drm_device *dev,
900 struct nouveau_tile_reg *tile,
901 struct nouveau_fence *fence);
902extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
903extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
904
905/* nouveau_notifier.c */
906extern int nouveau_notifier_init_channel(struct nouveau_channel *);
907extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
908extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
909 int cout, uint32_t start, uint32_t end,
910 uint32_t *offset);
911
912/* nouveau_channel.c */
913extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
914extern int nouveau_channel_alloc(struct drm_device *dev,
915 struct nouveau_channel **chan,
916 struct drm_file *file_priv,
917 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
918extern struct nouveau_channel *
919nouveau_channel_get_unlocked(struct nouveau_channel *);
920extern struct nouveau_channel *
921nouveau_channel_get(struct drm_file *, int id);
922extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
923extern void nouveau_channel_put(struct nouveau_channel **);
924extern void nouveau_channel_ref(struct nouveau_channel *chan,
925 struct nouveau_channel **pchan);
926extern int nouveau_channel_idle(struct nouveau_channel *chan);
927
928/* nouveau_gpuobj.c */
929#define NVOBJ_ENGINE_ADD(d, e, p) do { \
930 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
931 dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
932} while (0)
933
934#define NVOBJ_ENGINE_DEL(d, e) do { \
935 struct drm_nouveau_private *dev_priv = (d)->dev_private; \
936 dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
937} while (0)
938
939#define NVOBJ_CLASS(d, c, e) do { \
940 int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
941 if (ret) \
942 return ret; \
943} while (0)
944
945#define NVOBJ_MTHD(d, c, m, e) do { \
946 int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
947 if (ret) \
948 return ret; \
949} while (0)
950
951extern int nouveau_gpuobj_early_init(struct drm_device *);
952extern int nouveau_gpuobj_init(struct drm_device *);
953extern void nouveau_gpuobj_takedown(struct drm_device *);
954extern int nouveau_gpuobj_suspend(struct drm_device *dev);
955extern void nouveau_gpuobj_resume(struct drm_device *dev);
956extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
957extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
958 int (*exec)(struct nouveau_channel *,
959 u32 class, u32 mthd, u32 data));
960extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
961extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
962extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
963 uint32_t vram_h, uint32_t tt_h);
964extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
965extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
966 uint32_t size, int align, uint32_t flags,
967 struct nouveau_gpuobj **);
968extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
969 struct nouveau_gpuobj **);
970extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
971 u32 size, u32 flags,
972 struct nouveau_gpuobj **);
973extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
974 uint64_t offset, uint64_t size, int access,
975 int target, struct nouveau_gpuobj **);
976extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
977extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
978 u64 size, int target, int access, u32 type,
979 u32 comp, struct nouveau_gpuobj **pobj);
980extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
981 int class, u64 base, u64 size, int target,
982 int access, u32 type, u32 comp);
983
984/* nouveau_irq.c */
985extern int nouveau_irq_init(struct drm_device *);
986extern void nouveau_irq_fini(struct drm_device *);
987extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
988extern void nouveau_irq_register(struct drm_device *, int status_bit,
989 void (*)(struct drm_device *));
990extern void nouveau_irq_unregister(struct drm_device *, int status_bit);
991extern void nouveau_irq_preinstall(struct drm_device *);
992extern int nouveau_irq_postinstall(struct drm_device *);
993extern void nouveau_irq_uninstall(struct drm_device *);
994
995/* nouveau_sgdma.c */
996extern int nouveau_sgdma_init(struct drm_device *);
997extern void nouveau_sgdma_takedown(struct drm_device *);
998extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
999 uint32_t offset);
1000extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
1001 unsigned long size,
1002 uint32_t page_flags,
1003 struct page *dummy_read_page);
1004
1005/* nouveau_debugfs.c */
1006#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
1007extern int nouveau_debugfs_init(struct drm_minor *);
1008extern void nouveau_debugfs_takedown(struct drm_minor *);
1009extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
1010extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
1011#else
1012static inline int
1013nouveau_debugfs_init(struct drm_minor *minor)
1014{
1015 return 0;
1016}
1017
1018static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
1019{
1020}
1021
1022static inline int
1023nouveau_debugfs_channel_init(struct nouveau_channel *chan)
1024{
1025 return 0;
1026}
1027
1028static inline void
1029nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
1030{
1031}
1032#endif
1033
1034/* nouveau_dma.c */
1035extern void nouveau_dma_init(struct nouveau_channel *);
1036extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
1037
1038/* nouveau_acpi.c */
1039#define ROM_BIOS_PAGE 4096
1040#if defined(CONFIG_ACPI)
1041void nouveau_register_dsm_handler(void);
1042void nouveau_unregister_dsm_handler(void);
1043void nouveau_switcheroo_optimus_dsm(void);
1044int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
1045bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
1046int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
1047#else
1048static inline void nouveau_register_dsm_handler(void) {}
1049static inline void nouveau_unregister_dsm_handler(void) {}
1050static inline void nouveau_switcheroo_optimus_dsm(void) {}
1051static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
1052static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
1053static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
1054#endif
1055
1056/* nouveau_backlight.c */
1057#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
1058extern int nouveau_backlight_init(struct drm_device *);
1059extern void nouveau_backlight_exit(struct drm_device *);
1060#else
1061static inline int nouveau_backlight_init(struct drm_device *dev)
1062{
1063 return 0;
1064}
1065
1066static inline void nouveau_backlight_exit(struct drm_device *dev) { }
1067#endif
1068
1069/* nouveau_bios.c */
1070extern int nouveau_bios_init(struct drm_device *);
1071extern void nouveau_bios_takedown(struct drm_device *dev);
1072extern int nouveau_run_vbios_init(struct drm_device *);
1073extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
1074 struct dcb_entry *, int crtc);
1075extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
1076extern struct dcb_connector_table_entry *
1077nouveau_bios_connector_entry(struct drm_device *, int index);
1078extern u32 get_pll_register(struct drm_device *, enum pll_types);
1079extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
1080 struct pll_lims *);
1081extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
1082 struct dcb_entry *, int crtc);
1083extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
1084extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
1085extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
1086 bool *dl, bool *if_is_24bit);
1087extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
1088 int head, int pxclk);
1089extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
1090 enum LVDS_script, int pxclk);
1091bool bios_encoder_match(struct dcb_entry *, u32 hash);
1092
1093/* nouveau_mxm.c */
1094int nouveau_mxm_init(struct drm_device *dev);
1095void nouveau_mxm_fini(struct drm_device *dev);
1096
1097/* nouveau_ttm.c */
1098int nouveau_ttm_global_init(struct drm_nouveau_private *);
1099void nouveau_ttm_global_release(struct drm_nouveau_private *);
1100int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
1101
1102/* nouveau_hdmi.c */
1103void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
1104
1105/* nv04_fb.c */
1106extern int nv04_fb_vram_init(struct drm_device *);
1107extern int nv04_fb_init(struct drm_device *);
1108extern void nv04_fb_takedown(struct drm_device *);
1109
1110/* nv10_fb.c */
1111extern int nv10_fb_vram_init(struct drm_device *dev);
1112extern int nv1a_fb_vram_init(struct drm_device *dev);
1113extern int nv10_fb_init(struct drm_device *);
1114extern void nv10_fb_takedown(struct drm_device *);
1115extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
1116 uint32_t addr, uint32_t size,
1117 uint32_t pitch, uint32_t flags);
1118extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
1119extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
1120
1121/* nv20_fb.c */
1122extern int nv20_fb_vram_init(struct drm_device *dev);
1123extern int nv20_fb_init(struct drm_device *);
1124extern void nv20_fb_takedown(struct drm_device *);
1125extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
1126 uint32_t addr, uint32_t size,
1127 uint32_t pitch, uint32_t flags);
1128extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
1129extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
1130
1131/* nv30_fb.c */
1132extern int nv30_fb_init(struct drm_device *);
1133extern void nv30_fb_takedown(struct drm_device *);
1134extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
1135 uint32_t addr, uint32_t size,
1136 uint32_t pitch, uint32_t flags);
1137extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
1138
1139/* nv40_fb.c */
1140extern int nv40_fb_vram_init(struct drm_device *dev);
1141extern int nv40_fb_init(struct drm_device *);
1142extern void nv40_fb_takedown(struct drm_device *);
1143extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
1144
1145/* nv50_fb.c */
1146extern int nv50_fb_init(struct drm_device *);
1147extern void nv50_fb_takedown(struct drm_device *);
1148extern void nv50_fb_vm_trap(struct drm_device *, int display);
1149
1150/* nvc0_fb.c */
1151extern int nvc0_fb_init(struct drm_device *);
1152extern void nvc0_fb_takedown(struct drm_device *);
1153
1154/* nv04_graph.c */
1155extern int nv04_graph_create(struct drm_device *);
1156extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
1157extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
1158 u32 class, u32 mthd, u32 data);
1159extern struct nouveau_bitfield nv04_graph_nsource[];
1160
1161/* nv10_graph.c */
1162extern int nv10_graph_create(struct drm_device *);
1163extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
1164extern struct nouveau_bitfield nv10_graph_intr[];
1165extern struct nouveau_bitfield nv10_graph_nstatus[];
1166
1167/* nv20_graph.c */
1168extern int nv20_graph_create(struct drm_device *);
1169
1170/* nv40_graph.c */
1171extern int nv40_graph_create(struct drm_device *);
1172extern void nv40_grctx_init(struct drm_device *, u32 *size);
1173extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1174
1175/* nv50_graph.c */
1176extern int nv50_graph_create(struct drm_device *);
1177extern struct nouveau_enum nv50_data_error_names[];
1178extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
1179extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
1180extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
1181
1182/* nvc0_graph.c */
1183extern int nvc0_graph_create(struct drm_device *);
1184extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
1185
1186/* nve0_graph.c */
1187extern int nve0_graph_create(struct drm_device *);
1188
1189/* nv84_crypt.c */
1190extern int nv84_crypt_create(struct drm_device *);
1191
1192/* nv98_crypt.c */
1193extern int nv98_crypt_create(struct drm_device *dev);
1194
1195/* nva3_copy.c */
1196extern int nva3_copy_create(struct drm_device *dev);
1197
1198/* nvc0_copy.c */
1199extern int nvc0_copy_create(struct drm_device *dev, int engine);
1200
1201/* nv31_mpeg.c */
1202extern int nv31_mpeg_create(struct drm_device *dev);
1203
1204/* nv50_mpeg.c */
1205extern int nv50_mpeg_create(struct drm_device *dev);
1206
1207/* nv84_bsp.c */
1208/* nv98_bsp.c */
1209extern int nv84_bsp_create(struct drm_device *dev);
1210
1211/* nv84_vp.c */
1212/* nv98_vp.c */
1213extern int nv84_vp_create(struct drm_device *dev);
1214
1215/* nv98_ppp.c */
1216extern int nv98_ppp_create(struct drm_device *dev);
1217
1218/* nv04_instmem.c */
1219extern int nv04_instmem_init(struct drm_device *);
1220extern void nv04_instmem_takedown(struct drm_device *);
1221extern int nv04_instmem_suspend(struct drm_device *);
1222extern void nv04_instmem_resume(struct drm_device *);
1223extern int nv04_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1224 u32 size, u32 align);
1225extern void nv04_instmem_put(struct nouveau_gpuobj *);
1226extern int nv04_instmem_map(struct nouveau_gpuobj *);
1227extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
1228extern void nv04_instmem_flush(struct drm_device *);
1229
1230/* nv50_instmem.c */
1231extern int nv50_instmem_init(struct drm_device *);
1232extern void nv50_instmem_takedown(struct drm_device *);
1233extern int nv50_instmem_suspend(struct drm_device *);
1234extern void nv50_instmem_resume(struct drm_device *);
1235extern int nv50_instmem_get(struct nouveau_gpuobj *, struct nouveau_channel *,
1236 u32 size, u32 align);
1237extern void nv50_instmem_put(struct nouveau_gpuobj *);
1238extern int nv50_instmem_map(struct nouveau_gpuobj *);
1239extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
1240extern void nv50_instmem_flush(struct drm_device *);
1241extern void nv84_instmem_flush(struct drm_device *);
1242
1243/* nvc0_instmem.c */
1244extern int nvc0_instmem_init(struct drm_device *);
1245extern void nvc0_instmem_takedown(struct drm_device *);
1246extern int nvc0_instmem_suspend(struct drm_device *);
1247extern void nvc0_instmem_resume(struct drm_device *);
1248
1249/* nv04_mc.c */
1250extern int nv04_mc_init(struct drm_device *);
1251extern void nv04_mc_takedown(struct drm_device *);
1252
1253/* nv40_mc.c */
1254extern int nv40_mc_init(struct drm_device *);
1255extern void nv40_mc_takedown(struct drm_device *);
1256
1257/* nv50_mc.c */
1258extern int nv50_mc_init(struct drm_device *);
1259extern void nv50_mc_takedown(struct drm_device *);
1260
1261/* nv04_timer.c */
1262extern int nv04_timer_init(struct drm_device *);
1263extern uint64_t nv04_timer_read(struct drm_device *);
1264extern void nv04_timer_takedown(struct drm_device *);
1265
1266extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1267 unsigned long arg);
1268
1269/* nv04_dac.c */
1270extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *);
1271extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
1272extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1273extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1274extern bool nv04_dac_in_use(struct drm_encoder *encoder);
1275
1276/* nv04_dfp.c */
1277extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *);
1278extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
1279extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
1280 int head, bool dl);
1281extern void nv04_dfp_disable(struct drm_device *dev, int head);
1282extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
1283
1284/* nv04_tv.c */
1285extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
1286extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *);
1287
1288/* nv17_tv.c */
1289extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
1290
1291/* nv04_display.c */
1292extern int nv04_display_early_init(struct drm_device *);
1293extern void nv04_display_late_takedown(struct drm_device *);
1294extern int nv04_display_create(struct drm_device *);
1295extern void nv04_display_destroy(struct drm_device *);
1296extern int nv04_display_init(struct drm_device *);
1297extern void nv04_display_fini(struct drm_device *);
1298
1299/* nvd0_display.c */
1300extern int nvd0_display_create(struct drm_device *);
1301extern void nvd0_display_destroy(struct drm_device *);
1302extern int nvd0_display_init(struct drm_device *);
1303extern void nvd0_display_fini(struct drm_device *);
1304struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
1305void nvd0_display_flip_stop(struct drm_crtc *);
1306int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
1307 struct nouveau_channel *, u32 swap_interval);
1308
1309/* nv04_crtc.c */
1310extern int nv04_crtc_create(struct drm_device *, int index);
1311
1312/* nouveau_bo.c */
1313extern struct ttm_bo_driver nouveau_bo_driver;
1314extern void nouveau_bo_move_init(struct nouveau_channel *);
1315extern int nouveau_bo_new(struct drm_device *, int size, int align,
1316 uint32_t flags, uint32_t tile_mode,
1317 uint32_t tile_flags,
1318 struct sg_table *sg,
1319 struct nouveau_bo **);
1320extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1321extern int nouveau_bo_unpin(struct nouveau_bo *);
1322extern int nouveau_bo_map(struct nouveau_bo *);
1323extern void nouveau_bo_unmap(struct nouveau_bo *);
1324extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
1325 uint32_t busy);
1326extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1327extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1328extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1329extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1330extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1331extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1332 bool no_wait_reserve, bool no_wait_gpu);
1333
1334extern struct nouveau_vma *
1335nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
1336extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1337 struct nouveau_vma *);
1338extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1339
1340/* nouveau_gem.c */
1341extern int nouveau_gem_new(struct drm_device *, int size, int align,
1342 uint32_t domain, uint32_t tile_mode,
1343 uint32_t tile_flags, struct nouveau_bo **);
1344extern int nouveau_gem_object_new(struct drm_gem_object *);
1345extern void nouveau_gem_object_del(struct drm_gem_object *);
1346extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
1347extern void nouveau_gem_object_close(struct drm_gem_object *,
1348 struct drm_file *);
1349extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1350 struct drm_file *);
1351extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1352 struct drm_file *);
1353extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1354 struct drm_file *);
1355extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1356 struct drm_file *);
1357extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1358 struct drm_file *);
1359
1360extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
1361 struct drm_gem_object *obj, int flags);
1362extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
1363 struct dma_buf *dma_buf);
1364
1365/* nouveau_display.c */
1366int nouveau_display_create(struct drm_device *dev);
1367void nouveau_display_destroy(struct drm_device *dev);
1368int nouveau_display_init(struct drm_device *dev);
1369void nouveau_display_fini(struct drm_device *dev);
1370int nouveau_vblank_enable(struct drm_device *dev, int crtc);
1371void nouveau_vblank_disable(struct drm_device *dev, int crtc);
1372int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1373 struct drm_pending_vblank_event *event);
1374int nouveau_finish_page_flip(struct nouveau_channel *,
1375 struct nouveau_page_flip_state *);
1376int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1377 struct drm_mode_create_dumb *args);
1378int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1379 uint32_t handle, uint64_t *offset);
1380int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1381 uint32_t handle);
1382
1383/* nv10_gpio.c */
1384int nv10_gpio_init(struct drm_device *dev);
1385void nv10_gpio_fini(struct drm_device *dev);
1386int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1387int nv10_gpio_sense(struct drm_device *dev, int line);
1388void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
1389
1390/* nv50_gpio.c */
1391int nv50_gpio_init(struct drm_device *dev);
1392void nv50_gpio_fini(struct drm_device *dev);
1393int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1394int nv50_gpio_sense(struct drm_device *dev, int line);
1395void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
1396int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
1397int nvd0_gpio_sense(struct drm_device *dev, int line);
1398
1399/* nv50_calc.c */
1400int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
1401 int *N1, int *M1, int *N2, int *M2, int *P);
1402int nva3_calc_pll(struct drm_device *, struct pll_lims *,
1403 int clk, int *N, int *fN, int *M, int *P);
1404
1405#ifndef ioread32_native
1406#ifdef __BIG_ENDIAN
1407#define ioread16_native ioread16be
1408#define iowrite16_native iowrite16be
1409#define ioread32_native ioread32be
1410#define iowrite32_native iowrite32be
1411#else /* def __BIG_ENDIAN */
1412#define ioread16_native ioread16
1413#define iowrite16_native iowrite16
1414#define ioread32_native ioread32
1415#define iowrite32_native iowrite32
1416#endif /* def __BIG_ENDIAN else */
1417#endif /* !ioread32_native */
1418
1419/* channel control reg access */
1420static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
1421{
1422 return ioread32_native(chan->user + reg);
1423}
1424
1425static inline void nvchan_wr32(struct nouveau_channel *chan,
1426 unsigned reg, u32 val)
1427{
1428 iowrite32_native(val, chan->user + reg);
1429}
1430
1431/* register access */
1432static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
1433{
1434 struct drm_nouveau_private *dev_priv = dev->dev_private;
1435 return ioread32_native(dev_priv->mmio + reg);
1436}
1437
1438static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1439{
1440 struct drm_nouveau_private *dev_priv = dev->dev_private;
1441 iowrite32_native(val, dev_priv->mmio + reg);
1442}
1443
1444static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
1445{
1446 u32 tmp = nv_rd32(dev, reg);
1447 nv_wr32(dev, reg, (tmp & ~mask) | val);
1448 return tmp;
1449}
1450
1451static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
1452{
1453 struct drm_nouveau_private *dev_priv = dev->dev_private;
1454 return ioread8(dev_priv->mmio + reg);
1455}
1456
1457static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1458{
1459 struct drm_nouveau_private *dev_priv = dev->dev_private;
1460 iowrite8(val, dev_priv->mmio + reg);
1461}
1462
1463#define nv_wait(dev, reg, mask, val) \
1464 nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
1465#define nv_wait_ne(dev, reg, mask, val) \
1466 nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
1467#define nv_wait_cb(dev, func, data) \
1468 nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
1469
1470/* PRAMIN access */
1471static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1472{
1473 struct drm_nouveau_private *dev_priv = dev->dev_private;
1474 return ioread32_native(dev_priv->ramin + offset);
1475}
1476
1477static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1478{
1479 struct drm_nouveau_private *dev_priv = dev->dev_private;
1480 iowrite32_native(val, dev_priv->ramin + offset);
1481}
1482
1483/* object access */
1484extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
1485extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
1486
1487/*
1488 * Logging
1489 * Argument d is (struct drm_device *).
1490 */
1491#define NV_PRINTK(level, d, fmt, arg...) \
1492 printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
1493 pci_name(d->pdev), ##arg)
1494#ifndef NV_DEBUG_NOTRACE
1495#define NV_DEBUG(d, fmt, arg...) do { \
1496 if (drm_debug & DRM_UT_DRIVER) { \
1497 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1498 __LINE__, ##arg); \
1499 } \
1500} while (0)
1501#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1502 if (drm_debug & DRM_UT_KMS) { \
1503 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1504 __LINE__, ##arg); \
1505 } \
1506} while (0)
1507#else
1508#define NV_DEBUG(d, fmt, arg...) do { \
1509 if (drm_debug & DRM_UT_DRIVER) \
1510 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1511} while (0)
1512#define NV_DEBUG_KMS(d, fmt, arg...) do { \
1513 if (drm_debug & DRM_UT_KMS) \
1514 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1515} while (0)
1516#endif
1517#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
1518#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1519#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1520#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1521#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1522#define NV_WARNONCE(d, fmt, arg...) do { \
1523 static int _warned = 0; \
1524 if (!_warned) { \
1525 NV_WARN(d, fmt, ##arg); \
1526 _warned = 1; \
1527 } \
1528} while(0)
1529
1530/* nouveau_reg_debug bitmask */
1531enum {
1532 NOUVEAU_REG_DEBUG_MC = 0x1,
1533 NOUVEAU_REG_DEBUG_VIDEO = 0x2,
1534 NOUVEAU_REG_DEBUG_FB = 0x4,
1535 NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
1536 NOUVEAU_REG_DEBUG_CRTC = 0x10,
1537 NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
1538 NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
1539 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1540 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1541 NOUVEAU_REG_DEBUG_EVO = 0x200,
1542 NOUVEAU_REG_DEBUG_AUXCH = 0x400
1543};
1544
1545#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
1546 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
1547 NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
1548} while (0)
1549
1550static inline bool
1551nv_two_heads(struct drm_device *dev)
1552{
1553 struct drm_nouveau_private *dev_priv = dev->dev_private;
1554 const int impl = dev->pci_device & 0x0ff0;
1555
1556 if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
1557 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
1558 return true;
1559
1560 return false;
1561}
1562
1563static inline bool
1564nv_gf4_disp_arch(struct drm_device *dev)
1565{
1566 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
1567}
1568
1569static inline bool
1570nv_two_reg_pll(struct drm_device *dev)
1571{
1572 struct drm_nouveau_private *dev_priv = dev->dev_private;
1573 const int impl = dev->pci_device & 0x0ff0;
1574
1575 if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
1576 return true;
1577 return false;
1578}
1579
1580static inline bool
1581nv_match_device(struct drm_device *dev, unsigned device,
1582 unsigned sub_vendor, unsigned sub_device)
1583{
1584 return dev->pdev->device == device &&
1585 dev->pdev->subsystem_vendor == sub_vendor &&
1586 dev->pdev->subsystem_device == sub_device;
1587}
1588
1589static inline void *
1590nv_engine(struct drm_device *dev, int engine)
1591{
1592 struct drm_nouveau_private *dev_priv = dev->dev_private;
1593 return (void *)dev_priv->eng[engine];
1594}
1595
1596/* returns 1 if device is one of the nv4x using the 0x4497 object class,
1597 * helpful to determine a number of other hardware features
1598 */
1599static inline int
1600nv44_graph_class(struct drm_device *dev)
1601{
1602 struct drm_nouveau_private *dev_priv = dev->dev_private;
1603
1604 if ((dev_priv->chipset & 0xf0) == 0x60)
1605 return 1;
1606
1607 return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
1608}
1609
1610/* memory type/access flags, do not match hardware values */
1611#define NV_MEM_ACCESS_RO 1
1612#define NV_MEM_ACCESS_WO 2
1613#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
1614#define NV_MEM_ACCESS_SYS 4
1615#define NV_MEM_ACCESS_VM 8
1616#define NV_MEM_ACCESS_NOSNOOP 16
1617
1618#define NV_MEM_TARGET_VRAM 0
1619#define NV_MEM_TARGET_PCI 1
1620#define NV_MEM_TARGET_PCI_NOSNOOP 2
1621#define NV_MEM_TARGET_VM 3
1622#define NV_MEM_TARGET_GART 4
1623
1624#define NV_MEM_TYPE_VM 0x7f
1625#define NV_MEM_COMP_VM 0x03
1626
1627/* FIFO methods */
1628#define NV01_SUBCHAN_OBJECT 0x00000000
1629#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
1630#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
1631#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
1632#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
1633#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
1634#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
1635#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
1636#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
1637#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
1638#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
1639#define NV10_SUBCHAN_REF_CNT 0x00000050
1640#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
1641#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
1642#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
1643#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
1644#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
1645#define NV40_SUBCHAN_YIELD 0x00000080
1646
1647/* NV_SW object class */
1648#define NV_SW 0x0000506e
1649#define NV_SW_DMA_VBLSEM 0x0000018c
1650#define NV_SW_VBLSEM_OFFSET 0x00000400
1651#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
1652#define NV_SW_VBLSEM_RELEASE 0x00000408
1653#define NV_SW_PAGE_FLIP 0x00000500
1654
1655#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index db07b978946e..6a17bf2ba9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -27,23 +27,27 @@
27#ifndef __NOUVEAU_ENCODER_H__ 27#ifndef __NOUVEAU_ENCODER_H__
28#define __NOUVEAU_ENCODER_H__ 28#define __NOUVEAU_ENCODER_H__
29 29
30#include <subdev/bios/dcb.h>
31
30#include <drm/drm_encoder_slave.h> 32#include <drm/drm_encoder_slave.h>
31#include "nouveau_drv.h" 33#include "nv04_display.h"
32 34
33#define NV_DPMS_CLEARED 0x80 35#define NV_DPMS_CLEARED 0x80
34 36
37struct nouveau_i2c_port;
38
35struct dp_train_func { 39struct dp_train_func {
36 void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc, 40 void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
37 int nr, u32 bw, bool enhframe); 41 int nr, u32 bw, bool enhframe);
38 void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern); 42 void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
39 void (*train_adj)(struct drm_device *, struct dcb_entry *, 43 void (*train_adj)(struct drm_device *, struct dcb_output *,
40 u8 lane, u8 swing, u8 preem); 44 u8 lane, u8 swing, u8 preem);
41}; 45};
42 46
43struct nouveau_encoder { 47struct nouveau_encoder {
44 struct drm_encoder_slave base; 48 struct drm_encoder_slave base;
45 49
46 struct dcb_entry *dcb; 50 struct dcb_output *dcb;
47 int or; 51 int or;
48 52
49 /* different to drm_encoder.crtc, this reflects what's 53 /* different to drm_encoder.crtc, this reflects what's
@@ -87,18 +91,16 @@ get_slave_funcs(struct drm_encoder *enc)
87} 91}
88 92
89/* nouveau_dp.c */ 93/* nouveau_dp.c */
90int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
91 uint8_t *data, int data_nr);
92bool nouveau_dp_detect(struct drm_encoder *); 94bool nouveau_dp_detect(struct drm_encoder *);
93void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, 95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
94 struct dp_train_func *); 96 struct dp_train_func *);
95u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **); 97u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
96 98
97struct nouveau_connector * 99struct nouveau_connector *
98nouveau_encoder_connector_get(struct nouveau_encoder *encoder); 100nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
99int nv50_sor_create(struct drm_connector *, struct dcb_entry *); 101int nv50_sor_create(struct drm_connector *, struct dcb_output *);
100void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32); 102void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
101int nv50_dac_create(struct drm_connector *, struct dcb_entry *); 103int nv50_dac_create(struct drm_connector *, struct dcb_output *);
102 104
103 105
104#endif /* __NOUVEAU_ENCODER_H__ */ 106#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
deleted file mode 100644
index f3fb649fe454..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FB_H__
28#define __NOUVEAU_FB_H__
29
30struct nouveau_framebuffer {
31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo;
33 struct nouveau_vma vma;
34 u32 r_dma;
35 u32 r_format;
36 u32 r_pitch;
37};
38
39static inline struct nouveau_framebuffer *
40nouveau_framebuffer(struct drm_framebuffer *fb)
41{
42 return container_of(fb, struct nouveau_framebuffer, base);
43}
44
45int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
46 struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 7e41a4006087..67a1a069de28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -42,19 +42,30 @@
42#include <drm/drm_crtc.h> 42#include <drm/drm_crtc.h>
43#include <drm/drm_crtc_helper.h> 43#include <drm/drm_crtc_helper.h>
44#include <drm/drm_fb_helper.h> 44#include <drm/drm_fb_helper.h>
45#include "nouveau_drv.h" 45
46#include <drm/nouveau_drm.h> 46#include "nouveau_drm.h"
47#include "nouveau_crtc.h" 47#include "nouveau_gem.h"
48#include "nouveau_fb.h" 48#include "nouveau_bo.h"
49#include "nouveau_fbcon.h" 49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h" 50#include "nouveau_chan.h"
51
52#include "nouveau_crtc.h"
53
54#include <core/client.h>
55#include <core/device.h>
56
57#include <subdev/fb.h>
58
59MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
60static int nouveau_nofbaccel = 0;
61module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
51 62
52static void 63static void
53nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 64nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{ 65{
55 struct nouveau_fbdev *nfbdev = info->par; 66 struct nouveau_fbdev *fbcon = info->par;
56 struct drm_device *dev = nfbdev->dev; 67 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 68 struct nouveau_device *device = nv_device(drm->device);
58 int ret; 69 int ret;
59 70
60 if (info->state != FBINFO_STATE_RUNNING) 71 if (info->state != FBINFO_STATE_RUNNING)
@@ -62,15 +73,15 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
62 73
63 ret = -ENODEV; 74 ret = -ENODEV;
64 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 75 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
65 mutex_trylock(&dev_priv->channel->mutex)) { 76 mutex_trylock(&drm->client.mutex)) {
66 if (dev_priv->card_type < NV_50) 77 if (device->card_type < NV_50)
67 ret = nv04_fbcon_fillrect(info, rect); 78 ret = nv04_fbcon_fillrect(info, rect);
68 else 79 else
69 if (dev_priv->card_type < NV_C0) 80 if (device->card_type < NV_C0)
70 ret = nv50_fbcon_fillrect(info, rect); 81 ret = nv50_fbcon_fillrect(info, rect);
71 else 82 else
72 ret = nvc0_fbcon_fillrect(info, rect); 83 ret = nvc0_fbcon_fillrect(info, rect);
73 mutex_unlock(&dev_priv->channel->mutex); 84 mutex_unlock(&drm->client.mutex);
74 } 85 }
75 86
76 if (ret == 0) 87 if (ret == 0)
@@ -84,9 +95,9 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
84static void 95static void
85nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) 96nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
86{ 97{
87 struct nouveau_fbdev *nfbdev = info->par; 98 struct nouveau_fbdev *fbcon = info->par;
88 struct drm_device *dev = nfbdev->dev; 99 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
89 struct drm_nouveau_private *dev_priv = dev->dev_private; 100 struct nouveau_device *device = nv_device(drm->device);
90 int ret; 101 int ret;
91 102
92 if (info->state != FBINFO_STATE_RUNNING) 103 if (info->state != FBINFO_STATE_RUNNING)
@@ -94,15 +105,15 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
94 105
95 ret = -ENODEV; 106 ret = -ENODEV;
96 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 107 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
97 mutex_trylock(&dev_priv->channel->mutex)) { 108 mutex_trylock(&drm->client.mutex)) {
98 if (dev_priv->card_type < NV_50) 109 if (device->card_type < NV_50)
99 ret = nv04_fbcon_copyarea(info, image); 110 ret = nv04_fbcon_copyarea(info, image);
100 else 111 else
101 if (dev_priv->card_type < NV_C0) 112 if (device->card_type < NV_C0)
102 ret = nv50_fbcon_copyarea(info, image); 113 ret = nv50_fbcon_copyarea(info, image);
103 else 114 else
104 ret = nvc0_fbcon_copyarea(info, image); 115 ret = nvc0_fbcon_copyarea(info, image);
105 mutex_unlock(&dev_priv->channel->mutex); 116 mutex_unlock(&drm->client.mutex);
106 } 117 }
107 118
108 if (ret == 0) 119 if (ret == 0)
@@ -116,9 +127,9 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
116static void 127static void
117nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 128nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118{ 129{
119 struct nouveau_fbdev *nfbdev = info->par; 130 struct nouveau_fbdev *fbcon = info->par;
120 struct drm_device *dev = nfbdev->dev; 131 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
121 struct drm_nouveau_private *dev_priv = dev->dev_private; 132 struct nouveau_device *device = nv_device(drm->device);
122 int ret; 133 int ret;
123 134
124 if (info->state != FBINFO_STATE_RUNNING) 135 if (info->state != FBINFO_STATE_RUNNING)
@@ -126,15 +137,15 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
126 137
127 ret = -ENODEV; 138 ret = -ENODEV;
128 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 139 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
129 mutex_trylock(&dev_priv->channel->mutex)) { 140 mutex_trylock(&drm->client.mutex)) {
130 if (dev_priv->card_type < NV_50) 141 if (device->card_type < NV_50)
131 ret = nv04_fbcon_imageblit(info, image); 142 ret = nv04_fbcon_imageblit(info, image);
132 else 143 else
133 if (dev_priv->card_type < NV_C0) 144 if (device->card_type < NV_C0)
134 ret = nv50_fbcon_imageblit(info, image); 145 ret = nv50_fbcon_imageblit(info, image);
135 else 146 else
136 ret = nvc0_fbcon_imageblit(info, image); 147 ret = nvc0_fbcon_imageblit(info, image);
137 mutex_unlock(&dev_priv->channel->mutex); 148 mutex_unlock(&drm->client.mutex);
138 } 149 }
139 150
140 if (ret == 0) 151 if (ret == 0)
@@ -148,10 +159,9 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
148static int 159static int
149nouveau_fbcon_sync(struct fb_info *info) 160nouveau_fbcon_sync(struct fb_info *info)
150{ 161{
151 struct nouveau_fbdev *nfbdev = info->par; 162 struct nouveau_fbdev *fbcon = info->par;
152 struct drm_device *dev = nfbdev->dev; 163 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
153 struct drm_nouveau_private *dev_priv = dev->dev_private; 164 struct nouveau_channel *chan = drm->channel;
154 struct nouveau_channel *chan = dev_priv->channel;
155 int ret; 165 int ret;
156 166
157 if (!chan || !chan->accel_done || in_interrupt() || 167 if (!chan || !chan->accel_done || in_interrupt() ||
@@ -159,11 +169,11 @@ nouveau_fbcon_sync(struct fb_info *info)
159 info->flags & FBINFO_HWACCEL_DISABLED) 169 info->flags & FBINFO_HWACCEL_DISABLED)
160 return 0; 170 return 0;
161 171
162 if (!mutex_trylock(&chan->mutex)) 172 if (!mutex_trylock(&drm->client.mutex))
163 return 0; 173 return 0;
164 174
165 ret = nouveau_channel_idle(chan); 175 ret = nouveau_channel_idle(chan);
166 mutex_unlock(&chan->mutex); 176 mutex_unlock(&drm->client.mutex);
167 if (ret) { 177 if (ret) {
168 nouveau_fbcon_gpu_lockup(info); 178 nouveau_fbcon_gpu_lockup(info);
169 return 0; 179 return 0;
@@ -223,9 +233,9 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
223} 233}
224 234
225static void 235static void
226nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 236nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
227{ 237{
228 struct fb_info *info = nfbdev->helper.fbdev; 238 struct fb_info *info = fbcon->helper.fbdev;
229 struct fb_fillrect rect; 239 struct fb_fillrect rect;
230 240
231 /* Clear the entire fbcon. The drm will program every connector 241 /* Clear the entire fbcon. The drm will program every connector
@@ -241,11 +251,12 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
241} 251}
242 252
243static int 253static int
244nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, 254nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
245 struct drm_fb_helper_surface_size *sizes) 255 struct drm_fb_helper_surface_size *sizes)
246{ 256{
247 struct drm_device *dev = nfbdev->dev; 257 struct drm_device *dev = fbcon->dev;
248 struct drm_nouveau_private *dev_priv = dev->dev_private; 258 struct nouveau_drm *drm = nouveau_drm(dev);
259 struct nouveau_device *device = nv_device(drm->device);
249 struct fb_info *info; 260 struct fb_info *info;
250 struct drm_framebuffer *fb; 261 struct drm_framebuffer *fb;
251 struct nouveau_framebuffer *nouveau_fb; 262 struct nouveau_framebuffer *nouveau_fb;
@@ -253,7 +264,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
253 struct nouveau_bo *nvbo; 264 struct nouveau_bo *nvbo;
254 struct drm_mode_fb_cmd2 mode_cmd; 265 struct drm_mode_fb_cmd2 mode_cmd;
255 struct pci_dev *pdev = dev->pdev; 266 struct pci_dev *pdev = dev->pdev;
256 struct device *device = &pdev->dev;
257 int size, ret; 267 int size, ret;
258 268
259 mode_cmd.width = sizes->surface_width; 269 mode_cmd.width = sizes->surface_width;
@@ -271,37 +281,38 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
271 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 281 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
272 0, 0x0000, &nvbo); 282 0, 0x0000, &nvbo);
273 if (ret) { 283 if (ret) {
274 NV_ERROR(dev, "failed to allocate framebuffer\n"); 284 NV_ERROR(drm, "failed to allocate framebuffer\n");
275 goto out; 285 goto out;
276 } 286 }
277 287
278 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); 288 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
279 if (ret) { 289 if (ret) {
280 NV_ERROR(dev, "failed to pin fb: %d\n", ret); 290 NV_ERROR(drm, "failed to pin fb: %d\n", ret);
281 nouveau_bo_ref(NULL, &nvbo); 291 nouveau_bo_ref(NULL, &nvbo);
282 goto out; 292 goto out;
283 } 293 }
284 294
285 ret = nouveau_bo_map(nvbo); 295 ret = nouveau_bo_map(nvbo);
286 if (ret) { 296 if (ret) {
287 NV_ERROR(dev, "failed to map fb: %d\n", ret); 297 NV_ERROR(drm, "failed to map fb: %d\n", ret);
288 nouveau_bo_unpin(nvbo); 298 nouveau_bo_unpin(nvbo);
289 nouveau_bo_ref(NULL, &nvbo); 299 nouveau_bo_ref(NULL, &nvbo);
290 goto out; 300 goto out;
291 } 301 }
292 302
293 chan = nouveau_nofbaccel ? NULL : dev_priv->channel; 303 chan = nouveau_nofbaccel ? NULL : drm->channel;
294 if (chan && dev_priv->card_type >= NV_50) { 304 if (chan && device->card_type >= NV_50) {
295 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma); 305 ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
306 &fbcon->nouveau_fb.vma);
296 if (ret) { 307 if (ret) {
297 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret); 308 NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
298 chan = NULL; 309 chan = NULL;
299 } 310 }
300 } 311 }
301 312
302 mutex_lock(&dev->struct_mutex); 313 mutex_lock(&dev->struct_mutex);
303 314
304 info = framebuffer_alloc(0, device); 315 info = framebuffer_alloc(0, &pdev->dev);
305 if (!info) { 316 if (!info) {
306 ret = -ENOMEM; 317 ret = -ENOMEM;
307 goto out_unref; 318 goto out_unref;
@@ -313,16 +324,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
313 goto out_unref; 324 goto out_unref;
314 } 325 }
315 326
316 info->par = nfbdev; 327 info->par = fbcon;
317 328
318 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo); 329 nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
319 330
320 nouveau_fb = &nfbdev->nouveau_fb; 331 nouveau_fb = &fbcon->nouveau_fb;
321 fb = &nouveau_fb->base; 332 fb = &nouveau_fb->base;
322 333
323 /* setup helper */ 334 /* setup helper */
324 nfbdev->helper.fb = fb; 335 fbcon->helper.fb = fb;
325 nfbdev->helper.fbdev = info; 336 fbcon->helper.fbdev = info;
326 337
327 strcpy(info->fix.id, "nouveaufb"); 338 strcpy(info->fix.id, "nouveaufb");
328 if (nouveau_nofbaccel) 339 if (nouveau_nofbaccel)
@@ -341,25 +352,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
341 info->screen_size = size; 352 info->screen_size = size;
342 353
343 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 354 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
344 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); 355 drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
345
346 /* Set aperture base/size for vesafb takeover */
347 info->apertures = dev_priv->apertures;
348 if (!info->apertures) {
349 ret = -ENOMEM;
350 goto out_unref;
351 }
352 356
353 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 357 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
354 358
355 mutex_unlock(&dev->struct_mutex); 359 mutex_unlock(&dev->struct_mutex);
356 360
357 if (dev_priv->channel && !nouveau_nofbaccel) { 361 if (chan) {
358 ret = -ENODEV; 362 ret = -ENODEV;
359 if (dev_priv->card_type < NV_50) 363 if (device->card_type < NV_50)
360 ret = nv04_fbcon_accel_init(info); 364 ret = nv04_fbcon_accel_init(info);
361 else 365 else
362 if (dev_priv->card_type < NV_C0) 366 if (device->card_type < NV_C0)
363 ret = nv50_fbcon_accel_init(info); 367 ret = nv50_fbcon_accel_init(info);
364 else 368 else
365 ret = nvc0_fbcon_accel_init(info); 369 ret = nvc0_fbcon_accel_init(info);
@@ -368,13 +372,12 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
368 info->fbops = &nouveau_fbcon_ops; 372 info->fbops = &nouveau_fbcon_ops;
369 } 373 }
370 374
371 nouveau_fbcon_zfill(dev, nfbdev); 375 nouveau_fbcon_zfill(dev, fbcon);
372 376
373 /* To allow resizeing without swapping buffers */ 377 /* To allow resizeing without swapping buffers */
374 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n", 378 NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
375 nouveau_fb->base.width, 379 nouveau_fb->base.width, nouveau_fb->base.height,
376 nouveau_fb->base.height, 380 nvbo->bo.offset, nvbo);
377 nvbo->bo.offset, nvbo);
378 381
379 vga_switcheroo_client_fb_set(dev->pdev, info); 382 vga_switcheroo_client_fb_set(dev->pdev, info);
380 return 0; 383 return 0;
@@ -389,12 +392,12 @@ static int
389nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper, 392nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
390 struct drm_fb_helper_surface_size *sizes) 393 struct drm_fb_helper_surface_size *sizes)
391{ 394{
392 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper; 395 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
393 int new_fb = 0; 396 int new_fb = 0;
394 int ret; 397 int ret;
395 398
396 if (!helper->fb) { 399 if (!helper->fb) {
397 ret = nouveau_fbcon_create(nfbdev, sizes); 400 ret = nouveau_fbcon_create(fbcon, sizes);
398 if (ret) 401 if (ret)
399 return ret; 402 return ret;
400 new_fb = 1; 403 new_fb = 1;
@@ -405,18 +408,18 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
405void 408void
406nouveau_fbcon_output_poll_changed(struct drm_device *dev) 409nouveau_fbcon_output_poll_changed(struct drm_device *dev)
407{ 410{
408 struct drm_nouveau_private *dev_priv = dev->dev_private; 411 struct nouveau_drm *drm = nouveau_drm(dev);
409 drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); 412 drm_fb_helper_hotplug_event(&drm->fbcon->helper);
410} 413}
411 414
412static int 415static int
413nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) 416nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
414{ 417{
415 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; 418 struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
416 struct fb_info *info; 419 struct fb_info *info;
417 420
418 if (nfbdev->helper.fbdev) { 421 if (fbcon->helper.fbdev) {
419 info = nfbdev->helper.fbdev; 422 info = fbcon->helper.fbdev;
420 unregister_framebuffer(info); 423 unregister_framebuffer(info);
421 if (info->cmap.len) 424 if (info->cmap.len)
422 fb_dealloc_cmap(&info->cmap); 425 fb_dealloc_cmap(&info->cmap);
@@ -429,17 +432,17 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
429 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 432 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
430 nouveau_fb->nvbo = NULL; 433 nouveau_fb->nvbo = NULL;
431 } 434 }
432 drm_fb_helper_fini(&nfbdev->helper); 435 drm_fb_helper_fini(&fbcon->helper);
433 drm_framebuffer_cleanup(&nouveau_fb->base); 436 drm_framebuffer_cleanup(&nouveau_fb->base);
434 return 0; 437 return 0;
435} 438}
436 439
437void nouveau_fbcon_gpu_lockup(struct fb_info *info) 440void nouveau_fbcon_gpu_lockup(struct fb_info *info)
438{ 441{
439 struct nouveau_fbdev *nfbdev = info->par; 442 struct nouveau_fbdev *fbcon = info->par;
440 struct drm_device *dev = nfbdev->dev; 443 struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
441 444
442 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n"); 445 NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
443 info->flags |= FBINFO_HWACCEL_DISABLED; 446 info->flags |= FBINFO_HWACCEL_DISABLED;
444} 447}
445 448
@@ -450,74 +453,81 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
450}; 453};
451 454
452 455
453int nouveau_fbcon_init(struct drm_device *dev) 456int
457nouveau_fbcon_init(struct drm_device *dev)
454{ 458{
455 struct drm_nouveau_private *dev_priv = dev->dev_private; 459 struct nouveau_drm *drm = nouveau_drm(dev);
456 struct nouveau_fbdev *nfbdev; 460 struct nouveau_fb *pfb = nouveau_fb(drm->device);
461 struct nouveau_fbdev *fbcon;
457 int preferred_bpp; 462 int preferred_bpp;
458 int ret; 463 int ret;
459 464
460 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 465 if (!dev->mode_config.num_crtc)
461 if (!nfbdev) 466 return 0;
467
468 fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
469 if (!fbcon)
462 return -ENOMEM; 470 return -ENOMEM;
463 471
464 nfbdev->dev = dev; 472 fbcon->dev = dev;
465 dev_priv->nfbdev = nfbdev; 473 drm->fbcon = fbcon;
466 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs; 474 fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
467 475
468 ret = drm_fb_helper_init(dev, &nfbdev->helper, 476 ret = drm_fb_helper_init(dev, &fbcon->helper,
469 dev->mode_config.num_crtc, 4); 477 dev->mode_config.num_crtc, 4);
470 if (ret) { 478 if (ret) {
471 kfree(nfbdev); 479 kfree(fbcon);
472 return ret; 480 return ret;
473 } 481 }
474 482
475 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 483 drm_fb_helper_single_add_all_connectors(&fbcon->helper);
476 484
477 if (dev_priv->vram_size <= 32 * 1024 * 1024) 485 if (pfb->ram.size <= 32 * 1024 * 1024)
478 preferred_bpp = 8; 486 preferred_bpp = 8;
479 else if (dev_priv->vram_size <= 64 * 1024 * 1024) 487 else
488 if (pfb->ram.size <= 64 * 1024 * 1024)
480 preferred_bpp = 16; 489 preferred_bpp = 16;
481 else 490 else
482 preferred_bpp = 32; 491 preferred_bpp = 32;
483 492
484 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); 493 drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
485 return 0; 494 return 0;
486} 495}
487 496
488void nouveau_fbcon_fini(struct drm_device *dev) 497void
498nouveau_fbcon_fini(struct drm_device *dev)
489{ 499{
490 struct drm_nouveau_private *dev_priv = dev->dev_private; 500 struct nouveau_drm *drm = nouveau_drm(dev);
491 501
492 if (!dev_priv->nfbdev) 502 if (!drm->fbcon)
493 return; 503 return;
494 504
495 nouveau_fbcon_destroy(dev, dev_priv->nfbdev); 505 nouveau_fbcon_destroy(dev, drm->fbcon);
496 kfree(dev_priv->nfbdev); 506 kfree(drm->fbcon);
497 dev_priv->nfbdev = NULL; 507 drm->fbcon = NULL;
498} 508}
499 509
500void nouveau_fbcon_save_disable_accel(struct drm_device *dev) 510void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
501{ 511{
502 struct drm_nouveau_private *dev_priv = dev->dev_private; 512 struct nouveau_drm *drm = nouveau_drm(dev);
503 513
504 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags; 514 drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
505 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; 515 drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
506} 516}
507 517
508void nouveau_fbcon_restore_accel(struct drm_device *dev) 518void nouveau_fbcon_restore_accel(struct drm_device *dev)
509{ 519{
510 struct drm_nouveau_private *dev_priv = dev->dev_private; 520 struct nouveau_drm *drm = nouveau_drm(dev);
511 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags; 521 drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
512} 522}
513 523
514void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 524void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
515{ 525{
516 struct drm_nouveau_private *dev_priv = dev->dev_private; 526 struct nouveau_drm *drm = nouveau_drm(dev);
517 console_lock(); 527 console_lock();
518 if (state == 0) 528 if (state == 0)
519 nouveau_fbcon_save_disable_accel(dev); 529 nouveau_fbcon_save_disable_accel(dev);
520 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state); 530 fb_set_suspend(drm->fbcon->helper.fbdev, state);
521 if (state == 1) 531 if (state == 1)
522 nouveau_fbcon_restore_accel(dev); 532 nouveau_fbcon_restore_accel(dev);
523 console_unlock(); 533 console_unlock();
@@ -525,6 +535,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
525 535
526void nouveau_fbcon_zfill_all(struct drm_device *dev) 536void nouveau_fbcon_zfill_all(struct drm_device *dev)
527{ 537{
528 struct drm_nouveau_private *dev_priv = dev->dev_private; 538 struct nouveau_drm *drm = nouveau_drm(dev);
529 nouveau_fbcon_zfill(dev, dev_priv->nfbdev); 539 nouveau_fbcon_zfill(dev, drm->fbcon);
530} 540}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 1f2d27893438..fdfc0c94fbcc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -29,7 +29,8 @@
29 29
30#include <drm/drm_fb_helper.h> 30#include <drm/drm_fb_helper.h>
31 31
32#include "nouveau_fb.h" 32#include "nouveau_display.h"
33
33struct nouveau_fbdev { 34struct nouveau_fbdev {
34 struct drm_fb_helper helper; 35 struct drm_fb_helper helper;
35 struct nouveau_framebuffer nouveau_fb; 36 struct nouveau_framebuffer nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 614df7b958ca..1d049be79f74 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -29,11 +29,9 @@
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31 31
32#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_ramht.h"
34#include "nouveau_fence.h"
35#include "nouveau_software.h"
36#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_fence.h"
37 35
38void 36void
39nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 37nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
@@ -53,16 +51,16 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
53void 51void
54nouveau_fence_context_new(struct nouveau_fence_chan *fctx) 52nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
55{ 53{
54 INIT_LIST_HEAD(&fctx->flip);
56 INIT_LIST_HEAD(&fctx->pending); 55 INIT_LIST_HEAD(&fctx->pending);
57 spin_lock_init(&fctx->lock); 56 spin_lock_init(&fctx->lock);
58} 57}
59 58
60void 59static void
61nouveau_fence_update(struct nouveau_channel *chan) 60nouveau_fence_update(struct nouveau_channel *chan)
62{ 61{
63 struct drm_device *dev = chan->dev; 62 struct nouveau_fence_priv *priv = chan->drm->fence;
64 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); 63 struct nouveau_fence_chan *fctx = chan->fence;
65 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
66 struct nouveau_fence *fence, *fnext; 64 struct nouveau_fence *fence, *fnext;
67 65
68 spin_lock(&fctx->lock); 66 spin_lock(&fctx->lock);
@@ -82,9 +80,8 @@ nouveau_fence_update(struct nouveau_channel *chan)
82int 80int
83nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 81nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
84{ 82{
85 struct drm_device *dev = chan->dev; 83 struct nouveau_fence_priv *priv = chan->drm->fence;
86 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); 84 struct nouveau_fence_chan *fctx = chan->fence;
87 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
88 int ret; 85 int ret;
89 86
90 fence->channel = chan; 87 fence->channel = chan;
@@ -146,19 +143,17 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
146int 143int
147nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 144nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
148{ 145{
149 struct drm_device *dev = chan->dev; 146 struct nouveau_fence_priv *priv = chan->drm->fence;
150 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
151 struct nouveau_channel *prev; 147 struct nouveau_channel *prev;
152 int ret = 0; 148 int ret = 0;
153 149
154 prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; 150 prev = fence ? fence->channel : NULL;
155 if (prev) { 151 if (prev) {
156 if (unlikely(prev != chan && !nouveau_fence_done(fence))) { 152 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
157 ret = priv->sync(fence, prev, chan); 153 ret = priv->sync(fence, prev, chan);
158 if (unlikely(ret)) 154 if (unlikely(ret))
159 ret = nouveau_fence_wait(fence, true, false); 155 ret = nouveau_fence_wait(fence, true, false);
160 } 156 }
161 nouveau_channel_put_unlocked(&prev);
162 } 157 }
163 158
164 return ret; 159 return ret;
@@ -192,7 +187,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
192 struct nouveau_fence *fence; 187 struct nouveau_fence *fence;
193 int ret = 0; 188 int ret = 0;
194 189
195 if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE])) 190 if (unlikely(!chan->fence))
196 return -ENODEV; 191 return -ENODEV;
197 192
198 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 193 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 82ba733393ae..bedafd1c9539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,6 +1,8 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4struct nouveau_drm;
5
4struct nouveau_fence { 6struct nouveau_fence {
5 struct list_head head; 7 struct list_head head;
6 struct kref kref; 8 struct kref kref;
@@ -22,31 +24,48 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
22bool nouveau_fence_done(struct nouveau_fence *); 24bool nouveau_fence_done(struct nouveau_fence *);
23int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 25int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
24int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 26int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *);
27 27
28struct nouveau_fence_chan { 28struct nouveau_fence_chan {
29 struct list_head pending; 29 struct list_head pending;
30 struct list_head flip;
31
30 spinlock_t lock; 32 spinlock_t lock;
31 u32 sequence; 33 u32 sequence;
32}; 34};
33 35
34struct nouveau_fence_priv { 36struct nouveau_fence_priv {
35 struct nouveau_exec_engine engine; 37 void (*dtor)(struct nouveau_drm *);
36 int (*emit)(struct nouveau_fence *); 38 bool (*suspend)(struct nouveau_drm *);
37 int (*sync)(struct nouveau_fence *, struct nouveau_channel *, 39 void (*resume)(struct nouveau_drm *);
38 struct nouveau_channel *); 40 int (*context_new)(struct nouveau_channel *);
39 u32 (*read)(struct nouveau_channel *); 41 void (*context_del)(struct nouveau_channel *);
42 int (*emit)(struct nouveau_fence *);
43 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
44 struct nouveau_channel *);
45 u32 (*read)(struct nouveau_channel *);
40}; 46};
41 47
48#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
49
42void nouveau_fence_context_new(struct nouveau_fence_chan *); 50void nouveau_fence_context_new(struct nouveau_fence_chan *);
43void nouveau_fence_context_del(struct nouveau_fence_chan *); 51void nouveau_fence_context_del(struct nouveau_fence_chan *);
44 52
45int nv04_fence_create(struct drm_device *dev); 53int nv04_fence_create(struct nouveau_drm *);
46int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32); 54int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
47 55
48int nv10_fence_create(struct drm_device *dev); 56int nv10_fence_emit(struct nouveau_fence *);
49int nv84_fence_create(struct drm_device *dev); 57int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
50int nvc0_fence_create(struct drm_device *dev); 58 struct nouveau_channel *);
59u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *);
63
64int nv50_fence_create(struct nouveau_drm *);
65int nv84_fence_create(struct nouveau_drm *);
66int nvc0_fence_create(struct nouveau_drm *);
67u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
68
69int nouveau_flip_complete(void *chan);
51 70
52#endif 71#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.h b/drivers/gpu/drm/nouveau/nouveau_fifo.h
deleted file mode 100644
index ce99cab2f257..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_fifo.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef __NOUVEAU_FIFO_H__
2#define __NOUVEAU_FIFO_H__
3
4struct nouveau_fifo_priv {
5 struct nouveau_exec_engine base;
6 u32 channels;
7};
8
9struct nouveau_fifo_chan {
10};
11
12bool nv04_fifo_cache_pull(struct drm_device *, bool);
13void nv04_fifo_context_del(struct nouveau_channel *, int);
14int nv04_fifo_fini(struct drm_device *, int, bool);
15int nv04_fifo_init(struct drm_device *, int);
16void nv04_fifo_isr(struct drm_device *);
17void nv04_fifo_destroy(struct drm_device *, int);
18
19void nv50_fifo_playlist_update(struct drm_device *);
20void nv50_fifo_destroy(struct drm_device *, int);
21void nv50_fifo_tlb_flush(struct drm_device *, int);
22
23int nv04_fifo_create(struct drm_device *);
24int nv10_fifo_create(struct drm_device *);
25int nv17_fifo_create(struct drm_device *);
26int nv40_fifo_create(struct drm_device *);
27int nv50_fifo_create(struct drm_device *);
28int nv84_fifo_create(struct drm_device *);
29int nvc0_fifo_create(struct drm_device *);
30int nve0_fifo_create(struct drm_device *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8461a4f5710f..5e2f52158f19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,15 +23,18 @@
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * 24 *
25 */ 25 */
26
26#include <linux/dma-buf.h> 27#include <linux/dma-buf.h>
27#include <drm/drmP.h>
28 28
29#include "nouveau_drv.h" 29#include <subdev/fb.h>
30#include <drm/nouveau_drm.h> 30
31#include "nouveau_drm.h"
31#include "nouveau_dma.h" 32#include "nouveau_dma.h"
32#include "nouveau_fence.h" 33#include "nouveau_fence.h"
34#include "nouveau_abi16.h"
33 35
34#define nouveau_gem_pushbuf_sync(chan) 0 36#include "nouveau_ttm.h"
37#include "nouveau_gem.h"
35 38
36int 39int
37nouveau_gem_object_new(struct drm_gem_object *gem) 40nouveau_gem_object_new(struct drm_gem_object *gem)
@@ -66,19 +69,19 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
66int 69int
67nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 70nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
68{ 71{
69 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 72 struct nouveau_cli *cli = nouveau_cli(file_priv);
70 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 73 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
71 struct nouveau_vma *vma; 74 struct nouveau_vma *vma;
72 int ret; 75 int ret;
73 76
74 if (!fpriv->vm) 77 if (!cli->base.vm)
75 return 0; 78 return 0;
76 79
77 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 80 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
78 if (ret) 81 if (ret)
79 return ret; 82 return ret;
80 83
81 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 84 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
82 if (!vma) { 85 if (!vma) {
83 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 86 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
84 if (!vma) { 87 if (!vma) {
@@ -86,7 +89,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
86 goto out; 89 goto out;
87 } 90 }
88 91
89 ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); 92 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
90 if (ret) { 93 if (ret) {
91 kfree(vma); 94 kfree(vma);
92 goto out; 95 goto out;
@@ -103,19 +106,19 @@ out:
103void 106void
104nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 107nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
105{ 108{
106 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 109 struct nouveau_cli *cli = nouveau_cli(file_priv);
107 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 110 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
108 struct nouveau_vma *vma; 111 struct nouveau_vma *vma;
109 int ret; 112 int ret;
110 113
111 if (!fpriv->vm) 114 if (!cli->base.vm)
112 return; 115 return;
113 116
114 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 117 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
115 if (ret) 118 if (ret)
116 return; 119 return;
117 120
118 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 121 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
119 if (vma) { 122 if (vma) {
120 if (--vma->refcount == 0) { 123 if (--vma->refcount == 0) {
121 nouveau_bo_vma_del(nvbo, vma); 124 nouveau_bo_vma_del(nvbo, vma);
@@ -130,7 +133,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
130 uint32_t tile_mode, uint32_t tile_flags, 133 uint32_t tile_mode, uint32_t tile_flags,
131 struct nouveau_bo **pnvbo) 134 struct nouveau_bo **pnvbo)
132{ 135{
133 struct drm_nouveau_private *dev_priv = dev->dev_private; 136 struct nouveau_drm *drm = nouveau_drm(dev);
134 struct nouveau_bo *nvbo; 137 struct nouveau_bo *nvbo;
135 u32 flags = 0; 138 u32 flags = 0;
136 int ret; 139 int ret;
@@ -154,7 +157,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
154 */ 157 */
155 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
156 NOUVEAU_GEM_DOMAIN_GART; 159 NOUVEAU_GEM_DOMAIN_GART;
157 if (dev_priv->card_type >= NV_50) 160 if (nv_device(drm->device)->card_type >= NV_50)
158 nvbo->valid_domains &= domain; 161 nvbo->valid_domains &= domain;
159 162
160 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 163 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
@@ -172,7 +175,7 @@ static int
172nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 175nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
173 struct drm_nouveau_gem_info *rep) 176 struct drm_nouveau_gem_info *rep)
174{ 177{
175 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 178 struct nouveau_cli *cli = nouveau_cli(file_priv);
176 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 179 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
177 struct nouveau_vma *vma; 180 struct nouveau_vma *vma;
178 181
@@ -182,8 +185,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
182 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 185 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
183 186
184 rep->offset = nvbo->bo.offset; 187 rep->offset = nvbo->bo.offset;
185 if (fpriv->vm) { 188 if (cli->base.vm) {
186 vma = nouveau_bo_vma_find(nvbo, fpriv->vm); 189 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
187 if (!vma) 190 if (!vma)
188 return -EINVAL; 191 return -EINVAL;
189 192
@@ -201,15 +204,16 @@ int
201nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 204nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
202 struct drm_file *file_priv) 205 struct drm_file *file_priv)
203{ 206{
204 struct drm_nouveau_private *dev_priv = dev->dev_private; 207 struct nouveau_drm *drm = nouveau_drm(dev);
208 struct nouveau_fb *pfb = nouveau_fb(drm->device);
205 struct drm_nouveau_gem_new *req = data; 209 struct drm_nouveau_gem_new *req = data;
206 struct nouveau_bo *nvbo = NULL; 210 struct nouveau_bo *nvbo = NULL;
207 int ret = 0; 211 int ret = 0;
208 212
209 dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping; 213 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
210 214
211 if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { 215 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
212 NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); 216 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
213 return -EINVAL; 217 return -EINVAL;
214 } 218 }
215 219
@@ -311,16 +315,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
311 struct drm_nouveau_gem_pushbuf_bo *pbbo, 315 struct drm_nouveau_gem_pushbuf_bo *pbbo,
312 int nr_buffers, struct validate_op *op) 316 int nr_buffers, struct validate_op *op)
313{ 317{
314 struct drm_device *dev = chan->dev; 318 struct drm_device *dev = chan->drm->dev;
315 struct drm_nouveau_private *dev_priv = dev->dev_private; 319 struct nouveau_drm *drm = nouveau_drm(dev);
316 uint32_t sequence; 320 uint32_t sequence;
317 int trycnt = 0; 321 int trycnt = 0;
318 int ret, i; 322 int ret, i;
319 323
320 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); 324 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
321retry: 325retry:
322 if (++trycnt > 100000) { 326 if (++trycnt > 100000) {
323 NV_ERROR(dev, "%s failed and gave up.\n", __func__); 327 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
324 return -EINVAL; 328 return -EINVAL;
325 } 329 }
326 330
@@ -331,14 +335,14 @@ retry:
331 335
332 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 336 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
333 if (!gem) { 337 if (!gem) {
334 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); 338 NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
335 validate_fini(op, NULL); 339 validate_fini(op, NULL);
336 return -ENOENT; 340 return -ENOENT;
337 } 341 }
338 nvbo = gem->driver_private; 342 nvbo = gem->driver_private;
339 343
340 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 344 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
341 NV_ERROR(dev, "multiple instances of buffer %d on " 345 NV_ERROR(drm, "multiple instances of buffer %d on "
342 "validation list\n", b->handle); 346 "validation list\n", b->handle);
343 drm_gem_object_unreference_unlocked(gem); 347 drm_gem_object_unreference_unlocked(gem);
344 validate_fini(op, NULL); 348 validate_fini(op, NULL);
@@ -353,7 +357,7 @@ retry:
353 drm_gem_object_unreference_unlocked(gem); 357 drm_gem_object_unreference_unlocked(gem);
354 if (unlikely(ret)) { 358 if (unlikely(ret)) {
355 if (ret != -ERESTARTSYS) 359 if (ret != -ERESTARTSYS)
356 NV_ERROR(dev, "fail reserve\n"); 360 NV_ERROR(drm, "fail reserve\n");
357 return ret; 361 return ret;
358 } 362 }
359 goto retry; 363 goto retry;
@@ -372,7 +376,7 @@ retry:
372 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 376 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
373 list_add_tail(&nvbo->entry, &op->gart_list); 377 list_add_tail(&nvbo->entry, &op->gart_list);
374 else { 378 else {
375 NV_ERROR(dev, "invalid valid domains: 0x%08x\n", 379 NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
376 b->valid_domains); 380 b->valid_domains);
377 list_add_tail(&nvbo->entry, &op->both_list); 381 list_add_tail(&nvbo->entry, &op->both_list);
378 validate_fini(op, NULL); 382 validate_fini(op, NULL);
@@ -406,10 +410,9 @@ static int
406validate_list(struct nouveau_channel *chan, struct list_head *list, 410validate_list(struct nouveau_channel *chan, struct list_head *list,
407 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 411 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
408{ 412{
409 struct drm_nouveau_private *dev_priv = chan->dev->dev_private; 413 struct nouveau_drm *drm = chan->drm;
410 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 414 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
411 (void __force __user *)(uintptr_t)user_pbbo_ptr; 415 (void __force __user *)(uintptr_t)user_pbbo_ptr;
412 struct drm_device *dev = chan->dev;
413 struct nouveau_bo *nvbo; 416 struct nouveau_bo *nvbo;
414 int ret, relocs = 0; 417 int ret, relocs = 0;
415 418
@@ -418,7 +421,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
418 421
419 ret = validate_sync(chan, nvbo); 422 ret = validate_sync(chan, nvbo);
420 if (unlikely(ret)) { 423 if (unlikely(ret)) {
421 NV_ERROR(dev, "fail pre-validate sync\n"); 424 NV_ERROR(drm, "fail pre-validate sync\n");
422 return ret; 425 return ret;
423 } 426 }
424 427
@@ -426,24 +429,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
426 b->write_domains, 429 b->write_domains,
427 b->valid_domains); 430 b->valid_domains);
428 if (unlikely(ret)) { 431 if (unlikely(ret)) {
429 NV_ERROR(dev, "fail set_domain\n"); 432 NV_ERROR(drm, "fail set_domain\n");
430 return ret; 433 return ret;
431 } 434 }
432 435
433 ret = nouveau_bo_validate(nvbo, true, false, false); 436 ret = nouveau_bo_validate(nvbo, true, false, false);
434 if (unlikely(ret)) { 437 if (unlikely(ret)) {
435 if (ret != -ERESTARTSYS) 438 if (ret != -ERESTARTSYS)
436 NV_ERROR(dev, "fail ttm_validate\n"); 439 NV_ERROR(drm, "fail ttm_validate\n");
437 return ret; 440 return ret;
438 } 441 }
439 442
440 ret = validate_sync(chan, nvbo); 443 ret = validate_sync(chan, nvbo);
441 if (unlikely(ret)) { 444 if (unlikely(ret)) {
442 NV_ERROR(dev, "fail post-validate sync\n"); 445 NV_ERROR(drm, "fail post-validate sync\n");
443 return ret; 446 return ret;
444 } 447 }
445 448
446 if (dev_priv->card_type < NV_50) { 449 if (nv_device(drm->device)->card_type < NV_50) {
447 if (nvbo->bo.offset == b->presumed.offset && 450 if (nvbo->bo.offset == b->presumed.offset &&
448 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 451 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
449 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 452 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -475,7 +478,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
475 uint64_t user_buffers, int nr_buffers, 478 uint64_t user_buffers, int nr_buffers,
476 struct validate_op *op, int *apply_relocs) 479 struct validate_op *op, int *apply_relocs)
477{ 480{
478 struct drm_device *dev = chan->dev; 481 struct nouveau_drm *drm = chan->drm;
479 int ret, relocs = 0; 482 int ret, relocs = 0;
480 483
481 INIT_LIST_HEAD(&op->vram_list); 484 INIT_LIST_HEAD(&op->vram_list);
@@ -488,14 +491,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
488 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 491 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
489 if (unlikely(ret)) { 492 if (unlikely(ret)) {
490 if (ret != -ERESTARTSYS) 493 if (ret != -ERESTARTSYS)
491 NV_ERROR(dev, "validate_init\n"); 494 NV_ERROR(drm, "validate_init\n");
492 return ret; 495 return ret;
493 } 496 }
494 497
495 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 498 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
496 if (unlikely(ret < 0)) { 499 if (unlikely(ret < 0)) {
497 if (ret != -ERESTARTSYS) 500 if (ret != -ERESTARTSYS)
498 NV_ERROR(dev, "validate vram_list\n"); 501 NV_ERROR(drm, "validate vram_list\n");
499 validate_fini(op, NULL); 502 validate_fini(op, NULL);
500 return ret; 503 return ret;
501 } 504 }
@@ -504,7 +507,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
504 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 507 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
505 if (unlikely(ret < 0)) { 508 if (unlikely(ret < 0)) {
506 if (ret != -ERESTARTSYS) 509 if (ret != -ERESTARTSYS)
507 NV_ERROR(dev, "validate gart_list\n"); 510 NV_ERROR(drm, "validate gart_list\n");
508 validate_fini(op, NULL); 511 validate_fini(op, NULL);
509 return ret; 512 return ret;
510 } 513 }
@@ -513,7 +516,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
513 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 516 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
514 if (unlikely(ret < 0)) { 517 if (unlikely(ret < 0)) {
515 if (ret != -ERESTARTSYS) 518 if (ret != -ERESTARTSYS)
516 NV_ERROR(dev, "validate both_list\n"); 519 NV_ERROR(drm, "validate both_list\n");
517 validate_fini(op, NULL); 520 validate_fini(op, NULL);
518 return ret; 521 return ret;
519 } 522 }
@@ -546,6 +549,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
546 struct drm_nouveau_gem_pushbuf *req, 549 struct drm_nouveau_gem_pushbuf *req,
547 struct drm_nouveau_gem_pushbuf_bo *bo) 550 struct drm_nouveau_gem_pushbuf_bo *bo)
548{ 551{
552 struct nouveau_drm *drm = nouveau_drm(dev);
549 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 553 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
550 int ret = 0; 554 int ret = 0;
551 unsigned i; 555 unsigned i;
@@ -561,7 +565,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
561 uint32_t data; 565 uint32_t data;
562 566
563 if (unlikely(r->bo_index > req->nr_buffers)) { 567 if (unlikely(r->bo_index > req->nr_buffers)) {
564 NV_ERROR(dev, "reloc bo index invalid\n"); 568 NV_ERROR(drm, "reloc bo index invalid\n");
565 ret = -EINVAL; 569 ret = -EINVAL;
566 break; 570 break;
567 } 571 }
@@ -571,7 +575,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
571 continue; 575 continue;
572 576
573 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 577 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
574 NV_ERROR(dev, "reloc container bo index invalid\n"); 578 NV_ERROR(drm, "reloc container bo index invalid\n");
575 ret = -EINVAL; 579 ret = -EINVAL;
576 break; 580 break;
577 } 581 }
@@ -579,7 +583,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
579 583
580 if (unlikely(r->reloc_bo_offset + 4 > 584 if (unlikely(r->reloc_bo_offset + 4 >
581 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 585 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
582 NV_ERROR(dev, "reloc outside of bo\n"); 586 NV_ERROR(drm, "reloc outside of bo\n");
583 ret = -EINVAL; 587 ret = -EINVAL;
584 break; 588 break;
585 } 589 }
@@ -588,7 +592,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
588 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 592 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
589 &nvbo->kmap); 593 &nvbo->kmap);
590 if (ret) { 594 if (ret) {
591 NV_ERROR(dev, "failed kmap for reloc\n"); 595 NV_ERROR(drm, "failed kmap for reloc\n");
592 break; 596 break;
593 } 597 }
594 nvbo->validate_mapped = true; 598 nvbo->validate_mapped = true;
@@ -613,7 +617,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
613 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 617 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
614 spin_unlock(&nvbo->bo.bdev->fence_lock); 618 spin_unlock(&nvbo->bo.bdev->fence_lock);
615 if (ret) { 619 if (ret) {
616 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); 620 NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
617 break; 621 break;
618 } 622 }
619 623
@@ -628,62 +632,67 @@ int
628nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 632nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
629 struct drm_file *file_priv) 633 struct drm_file *file_priv)
630{ 634{
631 struct drm_nouveau_private *dev_priv = dev->dev_private; 635 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
636 struct nouveau_abi16_chan *temp;
637 struct nouveau_drm *drm = nouveau_drm(dev);
632 struct drm_nouveau_gem_pushbuf *req = data; 638 struct drm_nouveau_gem_pushbuf *req = data;
633 struct drm_nouveau_gem_pushbuf_push *push; 639 struct drm_nouveau_gem_pushbuf_push *push;
634 struct drm_nouveau_gem_pushbuf_bo *bo; 640 struct drm_nouveau_gem_pushbuf_bo *bo;
635 struct nouveau_channel *chan; 641 struct nouveau_channel *chan = NULL;
636 struct validate_op op; 642 struct validate_op op;
637 struct nouveau_fence *fence = NULL; 643 struct nouveau_fence *fence = NULL;
638 int i, j, ret = 0, do_reloc = 0; 644 int i, j, ret = 0, do_reloc = 0;
639 645
640 chan = nouveau_channel_get(file_priv, req->channel); 646 if (unlikely(!abi16))
641 if (IS_ERR(chan)) 647 return -ENOMEM;
642 return PTR_ERR(chan); 648
649 list_for_each_entry(temp, &abi16->channels, head) {
650 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
651 chan = temp->chan;
652 break;
653 }
654 }
643 655
644 req->vram_available = dev_priv->fb_aper_free; 656 if (!chan)
645 req->gart_available = dev_priv->gart_info.aper_free; 657 return nouveau_abi16_put(abi16, -ENOENT);
658
659 req->vram_available = drm->gem.vram_available;
660 req->gart_available = drm->gem.gart_available;
646 if (unlikely(req->nr_push == 0)) 661 if (unlikely(req->nr_push == 0))
647 goto out_next; 662 goto out_next;
648 663
649 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 664 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
650 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", 665 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
651 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 666 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
652 nouveau_channel_put(&chan); 667 return nouveau_abi16_put(abi16, -EINVAL);
653 return -EINVAL;
654 } 668 }
655 669
656 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 670 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
657 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", 671 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
658 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 672 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
659 nouveau_channel_put(&chan); 673 return nouveau_abi16_put(abi16, -EINVAL);
660 return -EINVAL;
661 } 674 }
662 675
663 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 676 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
664 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", 677 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
665 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 678 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
666 nouveau_channel_put(&chan); 679 return nouveau_abi16_put(abi16, -EINVAL);
667 return -EINVAL;
668 } 680 }
669 681
670 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 682 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
671 if (IS_ERR(push)) { 683 if (IS_ERR(push))
672 nouveau_channel_put(&chan); 684 return nouveau_abi16_put(abi16, PTR_ERR(push));
673 return PTR_ERR(push);
674 }
675 685
676 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 686 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
677 if (IS_ERR(bo)) { 687 if (IS_ERR(bo)) {
678 kfree(push); 688 kfree(push);
679 nouveau_channel_put(&chan); 689 return nouveau_abi16_put(abi16, PTR_ERR(bo));
680 return PTR_ERR(bo);
681 } 690 }
682 691
683 /* Ensure all push buffers are on validate list */ 692 /* Ensure all push buffers are on validate list */
684 for (i = 0; i < req->nr_push; i++) { 693 for (i = 0; i < req->nr_push; i++) {
685 if (push[i].bo_index >= req->nr_buffers) { 694 if (push[i].bo_index >= req->nr_buffers) {
686 NV_ERROR(dev, "push %d buffer not in list\n", i); 695 NV_ERROR(drm, "push %d buffer not in list\n", i);
687 ret = -EINVAL; 696 ret = -EINVAL;
688 goto out_prevalid; 697 goto out_prevalid;
689 } 698 }
@@ -694,7 +703,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
694 req->nr_buffers, &op, &do_reloc); 703 req->nr_buffers, &op, &do_reloc);
695 if (ret) { 704 if (ret) {
696 if (ret != -ERESTARTSYS) 705 if (ret != -ERESTARTSYS)
697 NV_ERROR(dev, "validate: %d\n", ret); 706 NV_ERROR(drm, "validate: %d\n", ret);
698 goto out_prevalid; 707 goto out_prevalid;
699 } 708 }
700 709
@@ -702,7 +711,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
702 if (do_reloc) { 711 if (do_reloc) {
703 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); 712 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
704 if (ret) { 713 if (ret) {
705 NV_ERROR(dev, "reloc apply: %d\n", ret); 714 NV_ERROR(drm, "reloc apply: %d\n", ret);
706 goto out; 715 goto out;
707 } 716 }
708 } 717 }
@@ -710,7 +719,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
710 if (chan->dma.ib_max) { 719 if (chan->dma.ib_max) {
711 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 720 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
712 if (ret) { 721 if (ret) {
713 NV_INFO(dev, "nv50cal_space: %d\n", ret); 722 NV_ERROR(drm, "nv50cal_space: %d\n", ret);
714 goto out; 723 goto out;
715 } 724 }
716 725
@@ -722,36 +731,33 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
722 push[i].length); 731 push[i].length);
723 } 732 }
724 } else 733 } else
725 if (dev_priv->chipset >= 0x25) { 734 if (nv_device(drm->device)->chipset >= 0x25) {
726 ret = RING_SPACE(chan, req->nr_push * 2); 735 ret = RING_SPACE(chan, req->nr_push * 2);
727 if (ret) { 736 if (ret) {
728 NV_ERROR(dev, "cal_space: %d\n", ret); 737 NV_ERROR(drm, "cal_space: %d\n", ret);
729 goto out; 738 goto out;
730 } 739 }
731 740
732 for (i = 0; i < req->nr_push; i++) { 741 for (i = 0; i < req->nr_push; i++) {
733 struct nouveau_bo *nvbo = (void *)(unsigned long) 742 struct nouveau_bo *nvbo = (void *)(unsigned long)
734 bo[push[i].bo_index].user_priv; 743 bo[push[i].bo_index].user_priv;
735 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
736 744
737 OUT_RING(chan, ((mem->start << PAGE_SHIFT) + 745 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
738 push[i].offset) | 2);
739 OUT_RING(chan, 0); 746 OUT_RING(chan, 0);
740 } 747 }
741 } else { 748 } else {
742 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 749 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
743 if (ret) { 750 if (ret) {
744 NV_ERROR(dev, "jmp_space: %d\n", ret); 751 NV_ERROR(drm, "jmp_space: %d\n", ret);
745 goto out; 752 goto out;
746 } 753 }
747 754
748 for (i = 0; i < req->nr_push; i++) { 755 for (i = 0; i < req->nr_push; i++) {
749 struct nouveau_bo *nvbo = (void *)(unsigned long) 756 struct nouveau_bo *nvbo = (void *)(unsigned long)
750 bo[push[i].bo_index].user_priv; 757 bo[push[i].bo_index].user_priv;
751 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
752 uint32_t cmd; 758 uint32_t cmd;
753 759
754 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); 760 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
755 cmd |= 0x20000000; 761 cmd |= 0x20000000;
756 if (unlikely(cmd != req->suffix0)) { 762 if (unlikely(cmd != req->suffix0)) {
757 if (!nvbo->kmap.virtual) { 763 if (!nvbo->kmap.virtual) {
@@ -770,8 +776,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
770 push[i].length - 8) / 4, cmd); 776 push[i].length - 8) / 4, cmd);
771 } 777 }
772 778
773 OUT_RING(chan, ((mem->start << PAGE_SHIFT) + 779 OUT_RING(chan, 0x20000000 |
774 push[i].offset) | 0x20000000); 780 (nvbo->bo.offset + push[i].offset));
775 OUT_RING(chan, 0); 781 OUT_RING(chan, 0);
776 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 782 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
777 OUT_RING(chan, 0); 783 OUT_RING(chan, 0);
@@ -780,7 +786,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
780 786
781 ret = nouveau_fence_new(chan, &fence); 787 ret = nouveau_fence_new(chan, &fence);
782 if (ret) { 788 if (ret) {
783 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 789 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
784 WIND_RING(chan); 790 WIND_RING(chan);
785 goto out; 791 goto out;
786 } 792 }
@@ -798,17 +804,16 @@ out_next:
798 req->suffix0 = 0x00000000; 804 req->suffix0 = 0x00000000;
799 req->suffix1 = 0x00000000; 805 req->suffix1 = 0x00000000;
800 } else 806 } else
801 if (dev_priv->chipset >= 0x25) { 807 if (nv_device(drm->device)->chipset >= 0x25) {
802 req->suffix0 = 0x00020000; 808 req->suffix0 = 0x00020000;
803 req->suffix1 = 0x00000000; 809 req->suffix1 = 0x00000000;
804 } else { 810 } else {
805 req->suffix0 = 0x20000000 | 811 req->suffix0 = 0x20000000 |
806 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); 812 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
807 req->suffix1 = 0x00000000; 813 req->suffix1 = 0x00000000;
808 } 814 }
809 815
810 nouveau_channel_put(&chan); 816 return nouveau_abi16_put(abi16, ret);
811 return ret;
812} 817}
813 818
814static inline uint32_t 819static inline uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
new file mode 100644
index 000000000000..5c1049236d22
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -0,0 +1,43 @@
1#ifndef __NOUVEAU_GEM_H__
2#define __NOUVEAU_GEM_H__
3
4#include <drm/drmP.h>
5
6#include "nouveau_drm.h"
7#include "nouveau_bo.h"
8
9#define nouveau_bo_tile_layout(nvbo) \
10 ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)
11
12static inline struct nouveau_bo *
13nouveau_gem_object(struct drm_gem_object *gem)
14{
15 return gem ? gem->driver_private : NULL;
16}
17
18/* nouveau_gem.c */
19extern int nouveau_gem_new(struct drm_device *, int size, int align,
20 uint32_t domain, uint32_t tile_mode,
21 uint32_t tile_flags, struct nouveau_bo **);
22extern int nouveau_gem_object_new(struct drm_gem_object *);
23extern void nouveau_gem_object_del(struct drm_gem_object *);
24extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
25extern void nouveau_gem_object_close(struct drm_gem_object *,
26 struct drm_file *);
27extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
28 struct drm_file *);
29extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
30 struct drm_file *);
31extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
32 struct drm_file *);
33extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
34 struct drm_file *);
35extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
36 struct drm_file *);
37
38extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
39 struct drm_gem_object *obj, int flags);
40extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
41 struct dma_buf *dma_buf);
42
43#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
deleted file mode 100644
index ded74e555e5f..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_gpio.h"
29
30static u8 *
31dcb_gpio_table(struct drm_device *dev)
32{
33 u8 *dcb = dcb_table(dev);
34 if (dcb) {
35 if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
36 return ROMPTR(dev, dcb[0x0a]);
37 if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
38 return ROMPTR(dev, dcb[-15]);
39 }
40 return NULL;
41}
42
43static u8 *
44dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
45{
46 u8 *table = dcb_gpio_table(dev);
47 if (table) {
48 *version = table[0];
49 if (*version < 0x30 && ent < table[2])
50 return table + 3 + (ent * table[1]);
51 else if (ent < table[2])
52 return table + table[1] + (ent * table[3]);
53 }
54 return NULL;
55}
56
57int
58nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
62
63 return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
64}
65
66int
67nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
71
72 return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
73}
74
75int
76nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
77 struct gpio_func *gpio)
78{
79 u8 *table, *entry, version;
80 int i = -1;
81
82 if (line == 0xff && func == 0xff)
83 return -EINVAL;
84
85 while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
86 if (version < 0x40) {
87 u16 data = ROM16(entry[0]);
88 *gpio = (struct gpio_func) {
89 .line = (data & 0x001f) >> 0,
90 .func = (data & 0x07e0) >> 5,
91 .log[0] = (data & 0x1800) >> 11,
92 .log[1] = (data & 0x6000) >> 13,
93 };
94 } else
95 if (version < 0x41) {
96 *gpio = (struct gpio_func) {
97 .line = entry[0] & 0x1f,
98 .func = entry[1],
99 .log[0] = (entry[3] & 0x18) >> 3,
100 .log[1] = (entry[3] & 0x60) >> 5,
101 };
102 } else {
103 *gpio = (struct gpio_func) {
104 .line = entry[0] & 0x3f,
105 .func = entry[1],
106 .log[0] = (entry[4] & 0x30) >> 4,
107 .log[1] = (entry[4] & 0xc0) >> 6,
108 };
109 }
110
111 if ((line == 0xff || line == gpio->line) &&
112 (func == 0xff || func == gpio->func))
113 return 0;
114 }
115
116 /* DCB 2.2, fixed TVDAC GPIO data */
117 if ((table = dcb_table(dev)) && table[0] >= 0x22) {
118 if (func == DCB_GPIO_TVDAC0) {
119 *gpio = (struct gpio_func) {
120 .func = DCB_GPIO_TVDAC0,
121 .line = table[-4] >> 4,
122 .log[0] = !!(table[-5] & 2),
123 .log[1] = !(table[-5] & 2),
124 };
125 return 0;
126 }
127 }
128
129 /* Apple iMac G4 NV18 */
130 if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
131 if (func == DCB_GPIO_TVDAC0) {
132 *gpio = (struct gpio_func) {
133 .func = DCB_GPIO_TVDAC0,
134 .line = 4,
135 .log[0] = 0,
136 .log[1] = 1,
137 };
138 return 0;
139 }
140 }
141
142 return -EINVAL;
143}
144
145int
146nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
147{
148 struct gpio_func gpio;
149 int ret;
150
151 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
152 if (ret == 0) {
153 int dir = !!(gpio.log[state] & 0x02);
154 int out = !!(gpio.log[state] & 0x01);
155 ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
156 }
157
158 return ret;
159}
160
161int
162nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
163{
164 struct gpio_func gpio;
165 int ret;
166
167 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
168 if (ret == 0) {
169 ret = nouveau_gpio_sense(dev, idx, gpio.line);
170 if (ret >= 0)
171 ret = (ret == (gpio.log[1] & 1));
172 }
173
174 return ret;
175}
176
177int
178nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
179{
180 struct drm_nouveau_private *dev_priv = dev->dev_private;
181 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
182 struct gpio_func gpio;
183 int ret;
184
185 ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
186 if (ret == 0) {
187 if (idx == 0 && pgpio->irq_enable)
188 pgpio->irq_enable(dev, gpio.line, on);
189 else
190 ret = -ENODEV;
191 }
192
193 return ret;
194}
195
196struct gpio_isr {
197 struct drm_device *dev;
198 struct list_head head;
199 struct work_struct work;
200 int idx;
201 struct gpio_func func;
202 void (*handler)(void *, int);
203 void *data;
204 bool inhibit;
205};
206
207static void
208nouveau_gpio_isr_bh(struct work_struct *work)
209{
210 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
211 struct drm_device *dev = isr->dev;
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
214 unsigned long flags;
215 int state;
216
217 state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
218 if (state >= 0)
219 isr->handler(isr->data, state);
220
221 spin_lock_irqsave(&pgpio->lock, flags);
222 isr->inhibit = false;
223 spin_unlock_irqrestore(&pgpio->lock, flags);
224}
225
226void
227nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
228{
229 struct drm_nouveau_private *dev_priv = dev->dev_private;
230 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
231 struct gpio_isr *isr;
232
233 if (idx != 0)
234 return;
235
236 spin_lock(&pgpio->lock);
237 list_for_each_entry(isr, &pgpio->isr, head) {
238 if (line_mask & (1 << isr->func.line)) {
239 if (isr->inhibit)
240 continue;
241 isr->inhibit = true;
242 schedule_work(&isr->work);
243 }
244 }
245 spin_unlock(&pgpio->lock);
246}
247
248int
249nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
250 void (*handler)(void *, int), void *data)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
254 struct gpio_isr *isr;
255 unsigned long flags;
256 int ret;
257
258 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
259 if (!isr)
260 return -ENOMEM;
261
262 ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
263 if (ret) {
264 kfree(isr);
265 return ret;
266 }
267
268 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
269 isr->dev = dev;
270 isr->handler = handler;
271 isr->data = data;
272 isr->idx = idx;
273
274 spin_lock_irqsave(&pgpio->lock, flags);
275 list_add(&isr->head, &pgpio->isr);
276 spin_unlock_irqrestore(&pgpio->lock, flags);
277 return 0;
278}
279
280void
281nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
282 void (*handler)(void *, int), void *data)
283{
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
286 struct gpio_isr *isr, *tmp;
287 struct gpio_func func;
288 unsigned long flags;
289 LIST_HEAD(tofree);
290 int ret;
291
292 ret = nouveau_gpio_find(dev, idx, tag, line, &func);
293 if (ret == 0) {
294 spin_lock_irqsave(&pgpio->lock, flags);
295 list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
296 if (memcmp(&isr->func, &func, sizeof(func)) ||
297 isr->idx != idx ||
298 isr->handler != handler || isr->data != data)
299 continue;
300 list_move(&isr->head, &tofree);
301 }
302 spin_unlock_irqrestore(&pgpio->lock, flags);
303
304 list_for_each_entry_safe(isr, tmp, &tofree, head) {
305 flush_work(&isr->work);
306 kfree(isr);
307 }
308 }
309}
310
311int
312nouveau_gpio_create(struct drm_device *dev)
313{
314 struct drm_nouveau_private *dev_priv = dev->dev_private;
315 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
316
317 INIT_LIST_HEAD(&pgpio->isr);
318 spin_lock_init(&pgpio->lock);
319
320 return nouveau_gpio_init(dev);
321}
322
323void
324nouveau_gpio_destroy(struct drm_device *dev)
325{
326 struct drm_nouveau_private *dev_priv = dev->dev_private;
327 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
328
329 nouveau_gpio_fini(dev);
330 BUG_ON(!list_empty(&pgpio->isr));
331}
332
333int
334nouveau_gpio_init(struct drm_device *dev)
335{
336 struct drm_nouveau_private *dev_priv = dev->dev_private;
337 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
338 int ret = 0;
339
340 if (pgpio->init)
341 ret = pgpio->init(dev);
342
343 return ret;
344}
345
346void
347nouveau_gpio_fini(struct drm_device *dev)
348{
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
351
352 if (pgpio->fini)
353 pgpio->fini(dev);
354}
355
356void
357nouveau_gpio_reset(struct drm_device *dev)
358{
359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 u8 *entry, version;
361 int ent = -1;
362
363 while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
364 u8 func = 0xff, line, defs, unk0, unk1;
365 if (version >= 0x41) {
366 defs = !!(entry[0] & 0x80);
367 line = entry[0] & 0x3f;
368 func = entry[1];
369 unk0 = entry[2];
370 unk1 = entry[3] & 0x1f;
371 } else
372 if (version >= 0x40) {
373 line = entry[0] & 0x1f;
374 func = entry[1];
375 defs = !!(entry[3] & 0x01);
376 unk0 = !!(entry[3] & 0x02);
377 unk1 = !!(entry[3] & 0x04);
378 } else {
379 break;
380 }
381
382 if (func == 0xff)
383 continue;
384
385 nouveau_gpio_func_set(dev, func, defs);
386
387 if (dev_priv->card_type >= NV_D0) {
388 nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
389 if (unk1--)
390 nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
391 } else
392 if (dev_priv->card_type >= NV_50) {
393 static const u32 regs[] = { 0xe100, 0xe28c };
394 u32 val = (unk1 << 16) | unk0;
395 u32 reg = regs[line >> 4]; line &= 0x0f;
396
397 nv_mask(dev, reg, 0x00010001 << line, val << line);
398 }
399 }
400}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
deleted file mode 100644
index 64c5cb077ace..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.h
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_GPIO_H__
24#define __NOUVEAU_GPIO_H__
25
26struct gpio_func {
27 u8 func;
28 u8 line;
29 u8 log[2];
30};
31
32/* nouveau_gpio.c */
33int nouveau_gpio_create(struct drm_device *);
34void nouveau_gpio_destroy(struct drm_device *);
35int nouveau_gpio_init(struct drm_device *);
36void nouveau_gpio_fini(struct drm_device *);
37void nouveau_gpio_reset(struct drm_device *);
38int nouveau_gpio_drive(struct drm_device *, int idx, int line,
39 int dir, int out);
40int nouveau_gpio_sense(struct drm_device *, int idx, int line);
41int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
42 struct gpio_func *);
43int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
44int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
45int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
46void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
47int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
48 void (*)(void *, int state), void *data);
49void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
50 void (*)(void *, int state), void *data);
51
52static inline bool
53nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
54{
55 struct gpio_func func;
56 return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
57}
58
59static inline int
60nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
61{
62 return nouveau_gpio_set(dev, 0, tag, 0xff, state);
63}
64
65static inline int
66nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
67{
68 return nouveau_gpio_get(dev, 0, tag, 0xff);
69}
70
71#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
deleted file mode 100644
index 1af7a39e0350..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ /dev/null
@@ -1,807 +0,0 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include <drm/drmP.h>
34#include "nouveau_drv.h"
35#include <drm/nouveau_drm.h>
36#include "nouveau_fifo.h"
37#include "nouveau_ramht.h"
38#include "nouveau_software.h"
39#include "nouveau_vm.h"
40
41struct nouveau_gpuobj_method {
42 struct list_head head;
43 u32 mthd;
44 int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
45};
46
47struct nouveau_gpuobj_class {
48 struct list_head head;
49 struct list_head methods;
50 u32 id;
51 u32 engine;
52};
53
54int
55nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
56{
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_gpuobj_class *oc;
59
60 oc = kzalloc(sizeof(*oc), GFP_KERNEL);
61 if (!oc)
62 return -ENOMEM;
63
64 INIT_LIST_HEAD(&oc->methods);
65 oc->id = class;
66 oc->engine = engine;
67 list_add(&oc->head, &dev_priv->classes);
68 return 0;
69}
70
71int
72nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
73 int (*exec)(struct nouveau_channel *, u32, u32, u32))
74{
75 struct drm_nouveau_private *dev_priv = dev->dev_private;
76 struct nouveau_gpuobj_method *om;
77 struct nouveau_gpuobj_class *oc;
78
79 list_for_each_entry(oc, &dev_priv->classes, head) {
80 if (oc->id == class)
81 goto found;
82 }
83
84 return -EINVAL;
85
86found:
87 om = kzalloc(sizeof(*om), GFP_KERNEL);
88 if (!om)
89 return -ENOMEM;
90
91 om->mthd = mthd;
92 om->exec = exec;
93 list_add(&om->head, &oc->methods);
94 return 0;
95}
96
97int
98nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
99 u32 class, u32 mthd, u32 data)
100{
101 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
102 struct nouveau_gpuobj_method *om;
103 struct nouveau_gpuobj_class *oc;
104
105 list_for_each_entry(oc, &dev_priv->classes, head) {
106 if (oc->id != class)
107 continue;
108
109 list_for_each_entry(om, &oc->methods, head) {
110 if (om->mthd == mthd)
111 return om->exec(chan, class, mthd, data);
112 }
113 }
114
115 return -ENOENT;
116}
117
118int
119nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
120 u32 class, u32 mthd, u32 data)
121{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
124 struct nouveau_channel *chan = NULL;
125 unsigned long flags;
126 int ret = -EINVAL;
127
128 spin_lock_irqsave(&dev_priv->channels.lock, flags);
129 if (chid >= 0 && chid < pfifo->channels)
130 chan = dev_priv->channels.ptr[chid];
131 if (chan)
132 ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
133 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
134 return ret;
135}
136
137int
138nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
139 uint32_t size, int align, uint32_t flags,
140 struct nouveau_gpuobj **gpuobj_ret)
141{
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
144 struct nouveau_gpuobj *gpuobj;
145 struct drm_mm_node *ramin = NULL;
146 int ret, i;
147
148 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
149 chan ? chan->id : -1, size, align, flags);
150
151 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
152 if (!gpuobj)
153 return -ENOMEM;
154 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
155 gpuobj->dev = dev;
156 gpuobj->flags = flags;
157 kref_init(&gpuobj->refcount);
158 gpuobj->size = size;
159
160 spin_lock(&dev_priv->ramin_lock);
161 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
162 spin_unlock(&dev_priv->ramin_lock);
163
164 if (!(flags & NVOBJ_FLAG_VM) && chan) {
165 ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
166 if (ramin)
167 ramin = drm_mm_get_block(ramin, size, align);
168 if (!ramin) {
169 nouveau_gpuobj_ref(NULL, &gpuobj);
170 return -ENOMEM;
171 }
172
173 gpuobj->pinst = chan->ramin->pinst;
174 if (gpuobj->pinst != ~0)
175 gpuobj->pinst += ramin->start;
176
177 gpuobj->cinst = ramin->start;
178 gpuobj->vinst = ramin->start + chan->ramin->vinst;
179 gpuobj->node = ramin;
180 } else {
181 ret = instmem->get(gpuobj, chan, size, align);
182 if (ret) {
183 nouveau_gpuobj_ref(NULL, &gpuobj);
184 return ret;
185 }
186
187 ret = -ENOSYS;
188 if (!(flags & NVOBJ_FLAG_DONT_MAP))
189 ret = instmem->map(gpuobj);
190 if (ret)
191 gpuobj->pinst = ~0;
192
193 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
194 }
195
196 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
197 for (i = 0; i < gpuobj->size; i += 4)
198 nv_wo32(gpuobj, i, 0);
199 instmem->flush(dev);
200 }
201
202
203 *gpuobj_ret = gpuobj;
204 return 0;
205}
206
207int
208nouveau_gpuobj_init(struct drm_device *dev)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211
212 NV_DEBUG(dev, "\n");
213
214 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
215 INIT_LIST_HEAD(&dev_priv->classes);
216 spin_lock_init(&dev_priv->ramin_lock);
217 dev_priv->ramin_base = ~0;
218
219 return 0;
220}
221
222void
223nouveau_gpuobj_takedown(struct drm_device *dev)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_gpuobj_method *om, *tm;
227 struct nouveau_gpuobj_class *oc, *tc;
228
229 NV_DEBUG(dev, "\n");
230
231 list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
232 list_for_each_entry_safe(om, tm, &oc->methods, head) {
233 list_del(&om->head);
234 kfree(om);
235 }
236 list_del(&oc->head);
237 kfree(oc);
238 }
239
240 WARN_ON(!list_empty(&dev_priv->gpuobj_list));
241}
242
243
244static void
245nouveau_gpuobj_del(struct kref *ref)
246{
247 struct nouveau_gpuobj *gpuobj =
248 container_of(ref, struct nouveau_gpuobj, refcount);
249 struct drm_device *dev = gpuobj->dev;
250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
252 int i;
253
254 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
255
256 if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
257 for (i = 0; i < gpuobj->size; i += 4)
258 nv_wo32(gpuobj, i, 0);
259 instmem->flush(dev);
260 }
261
262 if (gpuobj->dtor)
263 gpuobj->dtor(dev, gpuobj);
264
265 if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
266 if (gpuobj->node) {
267 instmem->unmap(gpuobj);
268 instmem->put(gpuobj);
269 }
270 } else {
271 if (gpuobj->node) {
272 spin_lock(&dev_priv->ramin_lock);
273 drm_mm_put_block(gpuobj->node);
274 spin_unlock(&dev_priv->ramin_lock);
275 }
276 }
277
278 spin_lock(&dev_priv->ramin_lock);
279 list_del(&gpuobj->list);
280 spin_unlock(&dev_priv->ramin_lock);
281
282 kfree(gpuobj);
283}
284
285void
286nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
287{
288 if (ref)
289 kref_get(&ref->refcount);
290
291 if (*ptr)
292 kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
293
294 *ptr = ref;
295}
296
297int
298nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
299 u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
300{
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_gpuobj *gpuobj = NULL;
303 int i;
304
305 NV_DEBUG(dev,
306 "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
307 pinst, vinst, size, flags);
308
309 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
310 if (!gpuobj)
311 return -ENOMEM;
312 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
313 gpuobj->dev = dev;
314 gpuobj->flags = flags;
315 kref_init(&gpuobj->refcount);
316 gpuobj->size = size;
317 gpuobj->pinst = pinst;
318 gpuobj->cinst = NVOBJ_CINST_GLOBAL;
319 gpuobj->vinst = vinst;
320
321 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
322 for (i = 0; i < gpuobj->size; i += 4)
323 nv_wo32(gpuobj, i, 0);
324 dev_priv->engine.instmem.flush(dev);
325 }
326
327 spin_lock(&dev_priv->ramin_lock);
328 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
329 spin_unlock(&dev_priv->ramin_lock);
330 *pgpuobj = gpuobj;
331 return 0;
332}
333
334void
335nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
336 u64 base, u64 size, int target, int access,
337 u32 type, u32 comp)
338{
339 struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
340 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
341 u32 flags0;
342
343 flags0 = (comp << 29) | (type << 22) | class;
344 flags0 |= 0x00100000;
345
346 switch (access) {
347 case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
348 case NV_MEM_ACCESS_RW:
349 case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
350 default:
351 break;
352 }
353
354 switch (target) {
355 case NV_MEM_TARGET_VRAM:
356 flags0 |= 0x00010000;
357 break;
358 case NV_MEM_TARGET_PCI:
359 flags0 |= 0x00020000;
360 break;
361 case NV_MEM_TARGET_PCI_NOSNOOP:
362 flags0 |= 0x00030000;
363 break;
364 case NV_MEM_TARGET_GART:
365 base += dev_priv->gart_info.aper_base;
366 default:
367 flags0 &= ~0x00100000;
368 break;
369 }
370
371 /* convert to base + limit */
372 size = (base + size) - 1;
373
374 nv_wo32(obj, offset + 0x00, flags0);
375 nv_wo32(obj, offset + 0x04, lower_32_bits(size));
376 nv_wo32(obj, offset + 0x08, lower_32_bits(base));
377 nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
378 upper_32_bits(base));
379 nv_wo32(obj, offset + 0x10, 0x00000000);
380 nv_wo32(obj, offset + 0x14, 0x00000000);
381
382 pinstmem->flush(obj->dev);
383}
384
385int
386nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
387 int target, int access, u32 type, u32 comp,
388 struct nouveau_gpuobj **pobj)
389{
390 struct drm_device *dev = chan->dev;
391 int ret;
392
393 ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
394 if (ret)
395 return ret;
396
397 nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
398 access, type, comp);
399 return 0;
400}
401
402int
403nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
404 u64 size, int access, int target,
405 struct nouveau_gpuobj **pobj)
406{
407 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
408 struct drm_device *dev = chan->dev;
409 struct nouveau_gpuobj *obj;
410 u32 flags0, flags2;
411 int ret;
412
413 if (dev_priv->card_type >= NV_50) {
414 u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
415 u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
416
417 return nv50_gpuobj_dma_new(chan, class, base, size,
418 target, access, type, comp, pobj);
419 }
420
421 if (target == NV_MEM_TARGET_GART) {
422 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
423
424 if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
425 if (base == 0) {
426 nouveau_gpuobj_ref(gart, pobj);
427 return 0;
428 }
429
430 base = nouveau_sgdma_get_physical(dev, base);
431 target = NV_MEM_TARGET_PCI;
432 } else {
433 base += dev_priv->gart_info.aper_base;
434 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
435 target = NV_MEM_TARGET_PCI_NOSNOOP;
436 else
437 target = NV_MEM_TARGET_PCI;
438 }
439 }
440
441 flags0 = class;
442 flags0 |= 0x00003000; /* PT present, PT linear */
443 flags2 = 0;
444
445 switch (target) {
446 case NV_MEM_TARGET_PCI:
447 flags0 |= 0x00020000;
448 break;
449 case NV_MEM_TARGET_PCI_NOSNOOP:
450 flags0 |= 0x00030000;
451 break;
452 default:
453 break;
454 }
455
456 switch (access) {
457 case NV_MEM_ACCESS_RO:
458 flags0 |= 0x00004000;
459 break;
460 case NV_MEM_ACCESS_WO:
461 flags0 |= 0x00008000;
462 default:
463 flags2 |= 0x00000002;
464 break;
465 }
466
467 flags0 |= (base & 0x00000fff) << 20;
468 flags2 |= (base & 0xfffff000);
469
470 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
471 if (ret)
472 return ret;
473
474 nv_wo32(obj, 0x00, flags0);
475 nv_wo32(obj, 0x04, size - 1);
476 nv_wo32(obj, 0x08, flags2);
477 nv_wo32(obj, 0x0c, flags2);
478
479 obj->engine = NVOBJ_ENGINE_SW;
480 obj->class = class;
481 *pobj = obj;
482 return 0;
483}
484
485int
486nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
487{
488 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
489 struct drm_device *dev = chan->dev;
490 struct nouveau_gpuobj_class *oc;
491 int ret;
492
493 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
494
495 list_for_each_entry(oc, &dev_priv->classes, head) {
496 struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
497
498 if (oc->id != class)
499 continue;
500
501 if (!chan->engctx[oc->engine]) {
502 ret = eng->context_new(chan, oc->engine);
503 if (ret)
504 return ret;
505 }
506
507 return eng->object_new(chan, oc->engine, handle, class);
508 }
509
510 return -EINVAL;
511}
512
513static int
514nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
515{
516 struct drm_device *dev = chan->dev;
517 struct drm_nouveau_private *dev_priv = dev->dev_private;
518 uint32_t size;
519 uint32_t base;
520 int ret;
521
522 NV_DEBUG(dev, "ch%d\n", chan->id);
523
524 /* Base amount for object storage (4KiB enough?) */
525 size = 0x2000;
526 base = 0;
527
528 if (dev_priv->card_type == NV_50) {
529 /* Various fixed table thingos */
530 size += 0x1400; /* mostly unknown stuff */
531 size += 0x4000; /* vm pd */
532 base = 0x6000;
533 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
534 size += 0x8000;
535 /* RAMFC */
536 size += 0x1000;
537 }
538
539 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
540 if (ret) {
541 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
542 return ret;
543 }
544
545 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
546 if (ret) {
547 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
548 nouveau_gpuobj_ref(NULL, &chan->ramin);
549 return ret;
550 }
551
552 return 0;
553}
554
555static int
556nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
557{
558 struct drm_device *dev = chan->dev;
559 struct nouveau_gpuobj *pgd = NULL;
560 struct nouveau_vm_pgd *vpgd;
561 int ret;
562
563 ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
564 if (ret)
565 return ret;
566
567 /* create page directory for this vm if none currently exists,
568 * will be destroyed automagically when last reference to the
569 * vm is removed
570 */
571 if (list_empty(&vm->pgd_list)) {
572 ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
573 if (ret)
574 return ret;
575 }
576 nouveau_vm_ref(vm, &chan->vm, pgd);
577 nouveau_gpuobj_ref(NULL, &pgd);
578
579 /* point channel at vm's page directory */
580 vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
581 nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
582 nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
583 nv_wo32(chan->ramin, 0x0208, 0xffffffff);
584 nv_wo32(chan->ramin, 0x020c, 0x000000ff);
585
586 return 0;
587}
588
589int
590nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
591 uint32_t vram_h, uint32_t tt_h)
592{
593 struct drm_device *dev = chan->dev;
594 struct drm_nouveau_private *dev_priv = dev->dev_private;
595 struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
596 struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
597 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
598 int ret;
599
600 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
601 if (dev_priv->card_type >= NV_C0)
602 return nvc0_gpuobj_channel_init(chan, vm);
603
604 /* Allocate a chunk of memory for per-channel object storage */
605 ret = nouveau_gpuobj_channel_init_pramin(chan);
606 if (ret) {
607 NV_ERROR(dev, "init pramin\n");
608 return ret;
609 }
610
611 /* NV50 VM
612 * - Allocate per-channel page-directory
613 * - Link with shared channel VM
614 */
615 if (vm) {
616 u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
617 u64 vm_vinst = chan->ramin->vinst + pgd_offs;
618 u32 vm_pinst = chan->ramin->pinst;
619
620 if (vm_pinst != ~0)
621 vm_pinst += pgd_offs;
622
623 ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
624 0, &chan->vm_pd);
625 if (ret)
626 return ret;
627
628 nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
629 }
630
631 /* RAMHT */
632 if (dev_priv->card_type < NV_50) {
633 nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
634 } else {
635 struct nouveau_gpuobj *ramht = NULL;
636
637 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
638 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
639 if (ret)
640 return ret;
641
642 ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
643 nouveau_gpuobj_ref(NULL, &ramht);
644 if (ret)
645 return ret;
646 }
647
648 /* VRAM ctxdma */
649 if (dev_priv->card_type >= NV_50) {
650 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
651 0, (1ULL << 40), NV_MEM_ACCESS_RW,
652 NV_MEM_TARGET_VM, &vram);
653 if (ret) {
654 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
655 return ret;
656 }
657 } else {
658 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
659 0, dev_priv->fb_available_size,
660 NV_MEM_ACCESS_RW,
661 NV_MEM_TARGET_VRAM, &vram);
662 if (ret) {
663 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
664 return ret;
665 }
666 }
667
668 ret = nouveau_ramht_insert(chan, vram_h, vram);
669 nouveau_gpuobj_ref(NULL, &vram);
670 if (ret) {
671 NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
672 return ret;
673 }
674
675 /* TT memory ctxdma */
676 if (dev_priv->card_type >= NV_50) {
677 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
678 0, (1ULL << 40), NV_MEM_ACCESS_RW,
679 NV_MEM_TARGET_VM, &tt);
680 } else {
681 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
682 0, dev_priv->gart_info.aper_size,
683 NV_MEM_ACCESS_RW,
684 NV_MEM_TARGET_GART, &tt);
685 }
686
687 if (ret) {
688 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
689 return ret;
690 }
691
692 ret = nouveau_ramht_insert(chan, tt_h, tt);
693 nouveau_gpuobj_ref(NULL, &tt);
694 if (ret) {
695 NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
696 return ret;
697 }
698
699 return 0;
700}
701
702void
703nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
704{
705 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
706
707 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
708 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
709
710 if (drm_mm_initialized(&chan->ramin_heap))
711 drm_mm_takedown(&chan->ramin_heap);
712 nouveau_gpuobj_ref(NULL, &chan->ramin);
713}
714
715int
716nouveau_gpuobj_suspend(struct drm_device *dev)
717{
718 struct drm_nouveau_private *dev_priv = dev->dev_private;
719 struct nouveau_gpuobj *gpuobj;
720 int i;
721
722 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
723 if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
724 continue;
725
726 gpuobj->suspend = vmalloc(gpuobj->size);
727 if (!gpuobj->suspend) {
728 nouveau_gpuobj_resume(dev);
729 return -ENOMEM;
730 }
731
732 for (i = 0; i < gpuobj->size; i += 4)
733 gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
734 }
735
736 return 0;
737}
738
739void
740nouveau_gpuobj_resume(struct drm_device *dev)
741{
742 struct drm_nouveau_private *dev_priv = dev->dev_private;
743 struct nouveau_gpuobj *gpuobj;
744 int i;
745
746 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
747 if (!gpuobj->suspend)
748 continue;
749
750 for (i = 0; i < gpuobj->size; i += 4)
751 nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
752
753 vfree(gpuobj->suspend);
754 gpuobj->suspend = NULL;
755 }
756
757 dev_priv->engine.instmem.flush(dev);
758}
759
760u32
761nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
762{
763 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
764 struct drm_device *dev = gpuobj->dev;
765 unsigned long flags;
766
767 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
768 u64 ptr = gpuobj->vinst + offset;
769 u32 base = ptr >> 16;
770 u32 val;
771
772 spin_lock_irqsave(&dev_priv->vm_lock, flags);
773 if (dev_priv->ramin_base != base) {
774 dev_priv->ramin_base = base;
775 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
776 }
777 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
778 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
779 return val;
780 }
781
782 return nv_ri32(dev, gpuobj->pinst + offset);
783}
784
785void
786nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
787{
788 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
789 struct drm_device *dev = gpuobj->dev;
790 unsigned long flags;
791
792 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
793 u64 ptr = gpuobj->vinst + offset;
794 u32 base = ptr >> 16;
795
796 spin_lock_irqsave(&dev_priv->vm_lock, flags);
797 if (dev_priv->ramin_base != base) {
798 dev_priv->ramin_base = base;
799 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
800 }
801 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
802 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
803 return;
804 }
805
806 nv_wi32(dev, gpuobj->pinst + offset, val);
807}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 1e942cfb9644..2c672cebc889 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -23,7 +23,7 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_connector.h" 27#include "nouveau_connector.h"
28#include "nouveau_encoder.h" 28#include "nouveau_encoder.h"
29#include "nouveau_crtc.h" 29#include "nouveau_crtc.h"
@@ -31,10 +31,10 @@
31static bool 31static bool
32hdmi_sor(struct drm_encoder *encoder) 32hdmi_sor(struct drm_encoder *encoder)
33{ 33{
34 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 34 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
35 if (dev_priv->chipset < 0xa3 || 35 if (nv_device(drm->device)->chipset < 0xa3 ||
36 dev_priv->chipset == 0xaa || 36 nv_device(drm->device)->chipset == 0xaa ||
37 dev_priv->chipset == 0xac) 37 nv_device(drm->device)->chipset == 0xac)
38 return false; 38 return false;
39 return true; 39 return true;
40} 40}
@@ -52,13 +52,15 @@ hdmi_base(struct drm_encoder *encoder)
52static void 52static void
53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val) 53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
54{ 54{
55 nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val); 55 struct nouveau_device *device = nouveau_dev(encoder->dev);
56 nv_wr32(device, hdmi_base(encoder) + reg, val);
56} 57}
57 58
58static u32 59static u32
59hdmi_rd32(struct drm_encoder *encoder, u32 reg) 60hdmi_rd32(struct drm_encoder *encoder, u32 reg)
60{ 61{
61 return nv_rd32(encoder->dev, hdmi_base(encoder) + reg); 62 struct nouveau_device *device = nouveau_dev(encoder->dev);
63 return nv_rd32(device, hdmi_base(encoder) + reg);
62} 64}
63 65
64static u32 66static u32
@@ -73,12 +75,11 @@ static void
73nouveau_audio_disconnect(struct drm_encoder *encoder) 75nouveau_audio_disconnect(struct drm_encoder *encoder)
74{ 76{
75 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 77 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
76 struct drm_device *dev = encoder->dev; 78 struct nouveau_device *device = nouveau_dev(encoder->dev);
77 u32 or = nv_encoder->or * 0x800; 79 u32 or = nv_encoder->or * 0x800;
78 80
79 if (hdmi_sor(encoder)) { 81 if (hdmi_sor(encoder))
80 nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000); 82 nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
81 }
82} 83}
83 84
84static void 85static void
@@ -86,8 +87,8 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
86 struct drm_display_mode *mode) 87 struct drm_display_mode *mode)
87{ 88{
88 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 89 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
90 struct nouveau_device *device = nouveau_dev(encoder->dev);
89 struct nouveau_connector *nv_connector; 91 struct nouveau_connector *nv_connector;
90 struct drm_device *dev = encoder->dev;
91 u32 or = nv_encoder->or * 0x800; 92 u32 or = nv_encoder->or * 0x800;
92 int i; 93 int i;
93 94
@@ -98,16 +99,16 @@ nouveau_audio_mode_set(struct drm_encoder *encoder,
98 } 99 }
99 100
100 if (hdmi_sor(encoder)) { 101 if (hdmi_sor(encoder)) {
101 nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001); 102 nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
102 103
103 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 104 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
104 if (nv_connector->base.eld[0]) { 105 if (nv_connector->base.eld[0]) {
105 u8 *eld = nv_connector->base.eld; 106 u8 *eld = nv_connector->base.eld;
106 for (i = 0; i < eld[2] * 4; i++) 107 for (i = 0; i < eld[2] * 4; i++)
107 nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]); 108 nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
108 for (i = eld[2] * 4; i < 0x60; i++) 109 for (i = eld[2] * 4; i < 0x60; i++)
109 nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00); 110 nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
110 nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002); 111 nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
111 } 112 }
112 } 113 }
113} 114}
@@ -219,9 +220,9 @@ void
219nouveau_hdmi_mode_set(struct drm_encoder *encoder, 220nouveau_hdmi_mode_set(struct drm_encoder *encoder,
220 struct drm_display_mode *mode) 221 struct drm_display_mode *mode)
221{ 222{
223 struct nouveau_device *device = nouveau_dev(encoder->dev);
222 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
223 struct nouveau_connector *nv_connector; 225 struct nouveau_connector *nv_connector;
224 struct drm_device *dev = encoder->dev;
225 u32 max_ac_packet, rekey; 226 u32 max_ac_packet, rekey;
226 227
227 nv_connector = nouveau_encoder_connector_get(nv_encoder); 228 nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -238,9 +239,9 @@ nouveau_hdmi_mode_set(struct drm_encoder *encoder,
238 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */ 239 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
239 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */ 240 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
240 241
241 nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */ 242 nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
242 nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */ 243 nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
243 nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */ 244 nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
244 245
245 /* value matches nvidia binary driver, and tegra constant */ 246 /* value matches nvidia binary driver, and tegra constant */
246 rekey = 56; 247 rekey = 56;
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 6eabc2ea0c7d..617a06ffdb46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -23,9 +23,13 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_hw.h" 27#include "nouveau_hw.h"
28 28
29#include <subdev/bios/pll.h>
30#include <subdev/clock.h>
31#include <subdev/timer.h>
32
29#define CHIPSET_NFORCE 0x01a0 33#define CHIPSET_NFORCE 0x01a0
30#define CHIPSET_NFORCE2 0x01f0 34#define CHIPSET_NFORCE2 0x01f0
31 35
@@ -82,12 +86,12 @@ NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
82void 86void
83NVSetOwner(struct drm_device *dev, int owner) 87NVSetOwner(struct drm_device *dev, int owner)
84{ 88{
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 89 struct nouveau_drm *drm = nouveau_drm(dev);
86 90
87 if (owner == 1) 91 if (owner == 1)
88 owner *= 3; 92 owner *= 3;
89 93
90 if (dev_priv->chipset == 0x11) { 94 if (nv_device(drm->device)->chipset == 0x11) {
91 /* This might seem stupid, but the blob does it and 95 /* This might seem stupid, but the blob does it and
92 * omitting it often locks the system up. 96 * omitting it often locks the system up.
93 */ 97 */
@@ -98,7 +102,7 @@ NVSetOwner(struct drm_device *dev, int owner)
98 /* CR44 is always changed on CRTC0 */ 102 /* CR44 is always changed on CRTC0 */
99 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); 103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
100 104
101 if (dev_priv->chipset == 0x11) { /* set me harder */ 105 if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */
102 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 106 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); 107 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
104 } 108 }
@@ -123,270 +127,6 @@ NVBlankScreen(struct drm_device *dev, int head, bool blank)
123} 127}
124 128
125/* 129/*
126 * PLL setting
127 */
128
129static int
130powerctrl_1_shift(int chip_version, int reg)
131{
132 int shift = -4;
133
134 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
135 return shift;
136
137 switch (reg) {
138 case NV_RAMDAC_VPLL2:
139 shift += 4;
140 case NV_PRAMDAC_VPLL_COEFF:
141 shift += 4;
142 case NV_PRAMDAC_MPLL_COEFF:
143 shift += 4;
144 case NV_PRAMDAC_NVPLL_COEFF:
145 shift += 4;
146 }
147
148 /*
149 * the shift for vpll regs is only used for nv3x chips with a single
150 * stage pll
151 */
152 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
153 chip_version == 0x36 || chip_version >= 0x40))
154 shift = -4;
155
156 return shift;
157}
158
159static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios.chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
167 uint32_t saved_powerctrl_1 = 0;
168 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
169
170 if (oldpll == pll)
171 return; /* already set */
172
173 if (shift_powerctrl_1 >= 0) {
174 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
175 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
176 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
177 1 << shift_powerctrl_1);
178 }
179
180 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
181 /* upclock -- write new post divider first */
182 NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
183 else
184 /* downclock -- write new NM first */
185 NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
186
187 if (chip_version < 0x17 && chip_version != 0x11)
188 /* wait a bit on older chips */
189 msleep(64);
190 NVReadRAMDAC(dev, 0, reg);
191
192 /* then write the other half as well */
193 NVWriteRAMDAC(dev, 0, reg, pll);
194
195 if (shift_powerctrl_1 >= 0)
196 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
197}
198
199static uint32_t
200new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
201{
202 bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
203
204 if (ss) /* single stage pll mode */
205 ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
206 NV_RAMDAC_580_VPLL2_ACTIVE;
207 else
208 ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
209 ~NV_RAMDAC_580_VPLL2_ACTIVE;
210
211 return ramdac580;
212}
213
214static void
215setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios.chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
223 uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
224 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
225 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
226 uint32_t oldramdac580 = 0, ramdac580 = 0;
227 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
228 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
229 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
230
231 /* model specific additions to generic pll1 and pll2 set up above */
232 if (nv3035) {
233 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
234 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
235 pll2 = 0;
236 }
237 if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
238 oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
239 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
240 if (oldramdac580 != ramdac580)
241 oldpll1 = ~0; /* force mismatch */
242 if (single_stage)
243 /* magic value used by nvidia in single stage mode */
244 pll2 |= 0x011f;
245 }
246 if (chip_version > 0x70)
247 /* magic bits set by the blob (but not the bios) on g71-73 */
248 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
249
250 if (oldpll1 == pll1 && oldpll2 == pll2)
251 return; /* already set */
252
253 if (shift_powerctrl_1 >= 0) {
254 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
255 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
256 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
257 1 << shift_powerctrl_1);
258 }
259
260 if (chip_version >= 0x40) {
261 int shift_c040 = 14;
262
263 switch (reg1) {
264 case NV_PRAMDAC_MPLL_COEFF:
265 shift_c040 += 2;
266 case NV_PRAMDAC_NVPLL_COEFF:
267 shift_c040 += 2;
268 case NV_RAMDAC_VPLL2:
269 shift_c040 += 2;
270 case NV_PRAMDAC_VPLL_COEFF:
271 shift_c040 += 2;
272 }
273
274 savedc040 = nvReadMC(dev, 0xc040);
275 if (shift_c040 != 14)
276 nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
277 }
278
279 if (oldramdac580 != ramdac580)
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
281
282 if (!nv3035)
283 NVWriteRAMDAC(dev, 0, reg2, pll2);
284 NVWriteRAMDAC(dev, 0, reg1, pll1);
285
286 if (shift_powerctrl_1 >= 0)
287 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
288 if (chip_version >= 0x40)
289 nvWriteMC(dev, 0xc040, savedc040);
290}
291
292static void
293setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
294 struct nouveau_pll_vals *pv)
295{
296 /* When setting PLLs, there is a merry game of disabling and enabling
297 * various bits of hardware during the process. This function is a
298 * synthesis of six nv4x traces, nearly each card doing a subtly
299 * different thing. With luck all the necessary bits for each card are
300 * combined herein. Without luck it deviates from each card's formula
301 * so as to not work on any :)
302 */
303
304 uint32_t Preg = NMNMreg - 4;
305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */
312 uint32_t maskc040 = ~(3 << 14), savedc040;
313 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
314
315 if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
316 return;
317
318 if (Preg == 0x4000)
319 maskc040 = ~0x333;
320 if (Preg == 0x4058)
321 maskc040 = ~(0xc << 24);
322
323 if (mpll) {
324 struct pll_lims pll_lim;
325 uint8_t Pval2;
326
327 if (get_pll_limits(dev, Preg, &pll_lim))
328 return;
329
330 Pval2 = pv->log2P + pll_lim.log2p_bias;
331 if (Pval2 > pll_lim.max_log2p)
332 Pval2 = pll_lim.max_log2p;
333 Pval |= 1 << 28 | Pval2 << 20;
334
335 saved4600 = nvReadMC(dev, 0x4600);
336 nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
337 }
338 if (single_stage)
339 Pval |= mpll ? 1 << 12 : 1 << 8;
340
341 nvWriteMC(dev, Preg, oldPval | 1 << 28);
342 nvWriteMC(dev, Preg, Pval & ~(4 << 28));
343 if (mpll) {
344 Pval |= 8 << 20;
345 nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
346 nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
347 }
348
349 savedc040 = nvReadMC(dev, 0xc040);
350 nvWriteMC(dev, 0xc040, savedc040 & maskc040);
351
352 nvWriteMC(dev, NMNMreg, NMNM);
353 if (NMNMreg == 0x4024)
354 nvWriteMC(dev, 0x403c, NMNM);
355
356 nvWriteMC(dev, Preg, Pval);
357 if (mpll) {
358 Pval &= ~(8 << 20);
359 nvWriteMC(dev, 0x4020, Pval);
360 nvWriteMC(dev, 0x4038, Pval);
361 nvWriteMC(dev, 0x4600, saved4600);
362 }
363
364 nvWriteMC(dev, 0xc040, savedc040);
365
366 if (mpll) {
367 nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
368 nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
369 }
370}
371
372void
373nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios.chip_version;
378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) {
381 if (reg1 > 0x405c)
382 setPLL_double_highregs(dev, reg1, pv);
383 else
384 setPLL_double_lowregs(dev, reg1, pv);
385 } else
386 setPLL_single(dev, reg1, pv);
387}
388
389/*
390 * PLL getting 130 * PLL getting
391 */ 131 */
392 132
@@ -394,7 +134,7 @@ static void
394nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, 134nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
395 uint32_t pll2, struct nouveau_pll_vals *pllvals) 135 uint32_t pll2, struct nouveau_pll_vals *pllvals)
396{ 136{
397 struct drm_nouveau_private *dev_priv = dev->dev_private; 137 struct nouveau_drm *drm = nouveau_drm(dev);
398 138
399 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */ 139 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
400 140
@@ -411,7 +151,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
411 pllvals->NM1 = pll1 & 0xffff; 151 pllvals->NM1 = pll1 & 0xffff;
412 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) 152 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
413 pllvals->NM2 = pll2 & 0xffff; 153 pllvals->NM2 = pll2 & 0xffff;
414 else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) { 154 else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
415 pllvals->M1 &= 0xf; /* only 4 bits */ 155 pllvals->M1 &= 0xf; /* only 4 bits */
416 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { 156 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
417 pllvals->M2 = (pll1 >> 4) & 0x7; 157 pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -423,28 +163,30 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
423} 163}
424 164
425int 165int
426nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype, 166nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
427 struct nouveau_pll_vals *pllvals) 167 struct nouveau_pll_vals *pllvals)
428{ 168{
429 struct drm_nouveau_private *dev_priv = dev->dev_private; 169 struct nouveau_drm *drm = nouveau_drm(dev);
430 uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0; 170 struct nouveau_device *device = nv_device(drm->device);
431 struct pll_lims pll_lim; 171 struct nouveau_bios *bios = nouveau_bios(device);
172 uint32_t reg1, pll1, pll2 = 0;
173 struct nvbios_pll pll_lim;
432 int ret; 174 int ret;
433 175
434 if (reg1 == 0) 176 ret = nvbios_pll_parse(bios, plltype, &pll_lim);
177 if (ret || !(reg1 = pll_lim.reg))
435 return -ENOENT; 178 return -ENOENT;
436 179
437 pll1 = nvReadMC(dev, reg1); 180 pll1 = nv_rd32(device, reg1);
438
439 if (reg1 <= 0x405c) 181 if (reg1 <= 0x405c)
440 pll2 = nvReadMC(dev, reg1 + 4); 182 pll2 = nv_rd32(device, reg1 + 4);
441 else if (nv_two_reg_pll(dev)) { 183 else if (nv_two_reg_pll(dev)) {
442 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70); 184 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
443 185
444 pll2 = nvReadMC(dev, reg2); 186 pll2 = nv_rd32(device, reg2);
445 } 187 }
446 188
447 if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { 189 if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
448 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); 190 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
449 191
450 /* check whether vpll has been forced into single stage mode */ 192 /* check whether vpll has been forced into single stage mode */
@@ -457,13 +199,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
457 } 199 }
458 200
459 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals); 201 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
460
461 ret = get_pll_limits(dev, plltype, &pll_lim);
462 if (ret)
463 return ret;
464
465 pllvals->refclk = pll_lim.refclk; 202 pllvals->refclk = pll_lim.refclk;
466
467 return 0; 203 return 0;
468} 204}
469 205
@@ -478,7 +214,7 @@ nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
478} 214}
479 215
480int 216int
481nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype) 217nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
482{ 218{
483 struct nouveau_pll_vals pllvals; 219 struct nouveau_pll_vals pllvals;
484 int ret; 220 int ret;
@@ -517,26 +253,30 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
517 * when such a condition detected. only seen on nv11 to date 253 * when such a condition detected. only seen on nv11 to date
518 */ 254 */
519 255
520 struct pll_lims pll_lim; 256 struct nouveau_drm *drm = nouveau_drm(dev);
257 struct nouveau_device *device = nv_device(drm->device);
258 struct nouveau_clock *clk = nouveau_clock(device);
259 struct nouveau_bios *bios = nouveau_bios(device);
260 struct nvbios_pll pll_lim;
521 struct nouveau_pll_vals pv; 261 struct nouveau_pll_vals pv;
522 enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; 262 enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
523 263
524 if (get_pll_limits(dev, pll, &pll_lim)) 264 if (nvbios_pll_parse(bios, pll, &pll_lim))
525 return; 265 return;
526 nouveau_hw_get_pllvals(dev, pll, &pv); 266 nouveau_hw_get_pllvals(dev, pll, &pv);
527 267
528 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && 268 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
529 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && 269 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
530 pv.log2P <= pll_lim.max_log2p) 270 pv.log2P <= pll_lim.max_p)
531 return; 271 return;
532 272
533 NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1); 273 NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
534 274
535 /* set lowest clock within static limits */ 275 /* set lowest clock within static limits */
536 pv.M1 = pll_lim.vco1.max_m; 276 pv.M1 = pll_lim.vco1.max_m;
537 pv.N1 = pll_lim.vco1.min_n; 277 pv.N1 = pll_lim.vco1.min_n;
538 pv.log2P = pll_lim.max_usable_log2p; 278 pv.log2P = pll_lim.max_p_usable;
539 nouveau_hw_setpll(dev, pll_lim.reg, &pv); 279 clk->pll_prog(clk, pll_lim.reg, &pv);
540} 280}
541 281
542/* 282/*
@@ -547,17 +287,16 @@ static void nouveau_vga_font_io(struct drm_device *dev,
547 void __iomem *iovram, 287 void __iomem *iovram,
548 bool save, unsigned plane) 288 bool save, unsigned plane)
549{ 289{
550 struct drm_nouveau_private *dev_priv = dev->dev_private;
551 unsigned i; 290 unsigned i;
552 291
553 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane); 292 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
554 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane); 293 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
555 for (i = 0; i < 16384; i++) { 294 for (i = 0; i < 16384; i++) {
556 if (save) { 295 if (save) {
557 dev_priv->saved_vga_font[plane][i] = 296 nv04_display(dev)->saved_vga_font[plane][i] =
558 ioread32_native(iovram + i * 4); 297 ioread32_native(iovram + i * 4);
559 } else { 298 } else {
560 iowrite32_native(dev_priv->saved_vga_font[plane][i], 299 iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
561 iovram + i * 4); 300 iovram + i * 4);
562 } 301 }
563 } 302 }
@@ -566,6 +305,7 @@ static void nouveau_vga_font_io(struct drm_device *dev,
566void 305void
567nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) 306nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
568{ 307{
308 struct nouveau_drm *drm = nouveau_drm(dev);
569 uint8_t misc, gr4, gr5, gr6, seq2, seq4; 309 uint8_t misc, gr4, gr5, gr6, seq2, seq4;
570 bool graphicsmode; 310 bool graphicsmode;
571 unsigned plane; 311 unsigned plane;
@@ -581,12 +321,12 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
581 if (graphicsmode) /* graphics mode => framebuffer => no need to save */ 321 if (graphicsmode) /* graphics mode => framebuffer => no need to save */
582 return; 322 return;
583 323
584 NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor"); 324 NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
585 325
586 /* map first 64KiB of VRAM, holds VGA fonts etc */ 326 /* map first 64KiB of VRAM, holds VGA fonts etc */
587 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); 327 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
588 if (!iovram) { 328 if (!iovram) {
589 NV_ERROR(dev, "Failed to map VRAM, " 329 NV_ERROR(drm, "Failed to map VRAM, "
590 "cannot save/restore VGA fonts.\n"); 330 "cannot save/restore VGA fonts.\n");
591 return; 331 return;
592 } 332 }
@@ -649,25 +389,25 @@ static void
649nv_save_state_ramdac(struct drm_device *dev, int head, 389nv_save_state_ramdac(struct drm_device *dev, int head,
650 struct nv04_mode_state *state) 390 struct nv04_mode_state *state)
651{ 391{
652 struct drm_nouveau_private *dev_priv = dev->dev_private; 392 struct nouveau_drm *drm = nouveau_drm(dev);
653 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 393 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
654 int i; 394 int i;
655 395
656 if (dev_priv->card_type >= NV_10) 396 if (nv_device(drm->device)->card_type >= NV_10)
657 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); 397 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
658 398
659 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals); 399 nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
660 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); 400 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
661 if (nv_two_heads(dev)) 401 if (nv_two_heads(dev))
662 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 402 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
663 if (dev_priv->chipset == 0x11) 403 if (nv_device(drm->device)->chipset == 0x11)
664 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); 404 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
665 405
666 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); 406 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
667 407
668 if (nv_gf4_disp_arch(dev)) 408 if (nv_gf4_disp_arch(dev))
669 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); 409 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
670 if (dev_priv->chipset >= 0x30) 410 if (nv_device(drm->device)->chipset >= 0x30)
671 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); 411 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
672 412
673 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); 413 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -709,7 +449,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
709 if (nv_gf4_disp_arch(dev)) 449 if (nv_gf4_disp_arch(dev))
710 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); 450 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
711 451
712 if (dev_priv->card_type == NV_40) { 452 if (nv_device(drm->device)->card_type == NV_40) {
713 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); 453 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
714 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); 454 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
715 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); 455 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -724,26 +464,27 @@ static void
724nv_load_state_ramdac(struct drm_device *dev, int head, 464nv_load_state_ramdac(struct drm_device *dev, int head,
725 struct nv04_mode_state *state) 465 struct nv04_mode_state *state)
726{ 466{
727 struct drm_nouveau_private *dev_priv = dev->dev_private; 467 struct nouveau_drm *drm = nouveau_drm(dev);
468 struct nouveau_clock *clk = nouveau_clock(drm->device);
728 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 469 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
729 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; 470 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
730 int i; 471 int i;
731 472
732 if (dev_priv->card_type >= NV_10) 473 if (nv_device(drm->device)->card_type >= NV_10)
733 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); 474 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
734 475
735 nouveau_hw_setpll(dev, pllreg, &regp->pllvals); 476 clk->pll_prog(clk, pllreg, &regp->pllvals);
736 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); 477 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
737 if (nv_two_heads(dev)) 478 if (nv_two_heads(dev))
738 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); 479 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
739 if (dev_priv->chipset == 0x11) 480 if (nv_device(drm->device)->chipset == 0x11)
740 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); 481 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
741 482
742 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); 483 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
743 484
744 if (nv_gf4_disp_arch(dev)) 485 if (nv_gf4_disp_arch(dev))
745 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); 486 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
746 if (dev_priv->chipset >= 0x30) 487 if (nv_device(drm->device)->chipset >= 0x30)
747 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); 488 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
748 489
749 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); 490 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -780,7 +521,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
780 if (nv_gf4_disp_arch(dev)) 521 if (nv_gf4_disp_arch(dev))
781 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); 522 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
782 523
783 if (dev_priv->card_type == NV_40) { 524 if (nv_device(drm->device)->card_type == NV_40) {
784 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); 525 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
785 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); 526 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
786 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); 527 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -845,7 +586,7 @@ static void
845nv_save_state_ext(struct drm_device *dev, int head, 586nv_save_state_ext(struct drm_device *dev, int head,
846 struct nv04_mode_state *state) 587 struct nv04_mode_state *state)
847{ 588{
848 struct drm_nouveau_private *dev_priv = dev->dev_private; 589 struct nouveau_drm *drm = nouveau_drm(dev);
849 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 590 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
850 int i; 591 int i;
851 592
@@ -861,10 +602,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
861 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 602 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
862 rd_cio_state(dev, head, regp, NV_CIO_CRE_21); 603 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
863 604
864 if (dev_priv->card_type >= NV_20) 605 if (nv_device(drm->device)->card_type >= NV_20)
865 rd_cio_state(dev, head, regp, NV_CIO_CRE_47); 606 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
866 607
867 if (dev_priv->card_type >= NV_30) 608 if (nv_device(drm->device)->card_type >= NV_30)
868 rd_cio_state(dev, head, regp, 0x9f); 609 rd_cio_state(dev, head, regp, 0x9f);
869 610
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49); 611 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -873,14 +614,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
873 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 614 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 615 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
875 616
876 if (dev_priv->card_type >= NV_10) { 617 if (nv_device(drm->device)->card_type >= NV_10) {
877 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); 618 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
878 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); 619 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
879 620
880 if (dev_priv->card_type >= NV_30) 621 if (nv_device(drm->device)->card_type >= NV_30)
881 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); 622 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
882 623
883 if (dev_priv->card_type == NV_40) 624 if (nv_device(drm->device)->card_type == NV_40)
884 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); 625 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
885 626
886 if (nv_two_heads(dev)) 627 if (nv_two_heads(dev))
@@ -892,7 +633,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
892 633
893 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 634 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
894 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 635 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
895 if (dev_priv->card_type >= NV_10) { 636 if (nv_device(drm->device)->card_type >= NV_10) {
896 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 637 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
897 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 638 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
898 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); 639 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -920,12 +661,14 @@ static void
920nv_load_state_ext(struct drm_device *dev, int head, 661nv_load_state_ext(struct drm_device *dev, int head,
921 struct nv04_mode_state *state) 662 struct nv04_mode_state *state)
922{ 663{
923 struct drm_nouveau_private *dev_priv = dev->dev_private; 664 struct nouveau_drm *drm = nouveau_drm(dev);
665 struct nouveau_device *device = nv_device(drm->device);
666 struct nouveau_timer *ptimer = nouveau_timer(device);
924 struct nv04_crtc_reg *regp = &state->crtc_reg[head]; 667 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
925 uint32_t reg900; 668 uint32_t reg900;
926 int i; 669 int i;
927 670
928 if (dev_priv->card_type >= NV_10) { 671 if (nv_device(drm->device)->card_type >= NV_10) {
929 if (nv_two_heads(dev)) 672 if (nv_two_heads(dev))
930 /* setting ENGINE_CTRL (EC) *must* come before 673 /* setting ENGINE_CTRL (EC) *must* come before
931 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in 674 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -933,24 +676,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
933 */ 676 */
934 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl); 677 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
935 678
936 nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1); 679 nv_wr32(device, NV_PVIDEO_STOP, 1);
937 nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0); 680 nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
938 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0); 681 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
939 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0); 682 nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
940 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1); 683 nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
941 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1); 684 nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
942 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1); 685 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
943 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1); 686 nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
944 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0); 687 nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
945 688
946 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); 689 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
947 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); 690 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
948 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); 691 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
949 692
950 if (dev_priv->card_type >= NV_30) 693 if (nv_device(drm->device)->card_type >= NV_30)
951 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); 694 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
952 695
953 if (dev_priv->card_type == NV_40) { 696 if (nv_device(drm->device)->card_type == NV_40) {
954 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); 697 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
955 698
956 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); 699 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -973,23 +716,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); 716 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
974 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); 717 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
975 718
976 if (dev_priv->card_type >= NV_20) 719 if (nv_device(drm->device)->card_type >= NV_20)
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_47); 720 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
978 721
979 if (dev_priv->card_type >= NV_30) 722 if (nv_device(drm->device)->card_type >= NV_30)
980 wr_cio_state(dev, head, regp, 0x9f); 723 wr_cio_state(dev, head, regp, 0x9f);
981 724
982 wr_cio_state(dev, head, regp, NV_CIO_CRE_49); 725 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
983 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 726 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
984 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 727 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 728 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
986 if (dev_priv->card_type == NV_40) 729 if (nv_device(drm->device)->card_type == NV_40)
987 nv_fix_nv40_hw_cursor(dev, head); 730 nv_fix_nv40_hw_cursor(dev, head);
988 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); 731 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
989 732
990 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); 733 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
991 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); 734 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
992 if (dev_priv->card_type >= NV_10) { 735 if (nv_device(drm->device)->card_type >= NV_10) {
993 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); 736 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
994 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); 737 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
995 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); 738 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -997,11 +740,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
997 } 740 }
998 /* NV11 and NV20 stop at 0x52. */ 741 /* NV11 and NV20 stop at 0x52. */
999 if (nv_gf4_disp_arch(dev)) { 742 if (nv_gf4_disp_arch(dev)) {
1000 if (dev_priv->card_type == NV_10) { 743 if (nv_device(drm->device)->card_type == NV_10) {
1001 /* Not waiting for vertical retrace before modifying 744 /* Not waiting for vertical retrace before modifying
1002 CRE_53/CRE_54 causes lockups. */ 745 CRE_53/CRE_54 causes lockups. */
1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); 746 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
1004 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 747 nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1005 } 748 }
1006 749
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_42); 750 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
@@ -1024,14 +767,15 @@ static void
1024nv_save_state_palette(struct drm_device *dev, int head, 767nv_save_state_palette(struct drm_device *dev, int head,
1025 struct nv04_mode_state *state) 768 struct nv04_mode_state *state)
1026{ 769{
770 struct nouveau_device *device = nouveau_dev(dev);
1027 int head_offset = head * NV_PRMDIO_SIZE, i; 771 int head_offset = head * NV_PRMDIO_SIZE, i;
1028 772
1029 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, 773 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
1030 NV_PRMDIO_PIXEL_MASK_MASK); 774 NV_PRMDIO_PIXEL_MASK_MASK);
1031 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0); 775 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
1032 776
1033 for (i = 0; i < 768; i++) { 777 for (i = 0; i < 768; i++) {
1034 state->crtc_reg[head].DAC[i] = nv_rd08(dev, 778 state->crtc_reg[head].DAC[i] = nv_rd08(device,
1035 NV_PRMDIO_PALETTE_DATA + head_offset); 779 NV_PRMDIO_PALETTE_DATA + head_offset);
1036 } 780 }
1037 781
@@ -1042,14 +786,15 @@ void
1042nouveau_hw_load_state_palette(struct drm_device *dev, int head, 786nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1043 struct nv04_mode_state *state) 787 struct nv04_mode_state *state)
1044{ 788{
789 struct nouveau_device *device = nouveau_dev(dev);
1045 int head_offset = head * NV_PRMDIO_SIZE, i; 790 int head_offset = head * NV_PRMDIO_SIZE, i;
1046 791
1047 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset, 792 nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
1048 NV_PRMDIO_PIXEL_MASK_MASK); 793 NV_PRMDIO_PIXEL_MASK_MASK);
1049 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0); 794 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
1050 795
1051 for (i = 0; i < 768; i++) { 796 for (i = 0; i < 768; i++) {
1052 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset, 797 nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
1053 state->crtc_reg[head].DAC[i]); 798 state->crtc_reg[head].DAC[i]);
1054 } 799 }
1055 800
@@ -1059,9 +804,9 @@ nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1059void nouveau_hw_save_state(struct drm_device *dev, int head, 804void nouveau_hw_save_state(struct drm_device *dev, int head,
1060 struct nv04_mode_state *state) 805 struct nv04_mode_state *state)
1061{ 806{
1062 struct drm_nouveau_private *dev_priv = dev->dev_private; 807 struct nouveau_drm *drm = nouveau_drm(dev);
1063 808
1064 if (dev_priv->chipset == 0x11) 809 if (nv_device(drm->device)->chipset == 0x11)
1065 /* NB: no attempt is made to restore the bad pll later on */ 810 /* NB: no attempt is made to restore the bad pll later on */
1066 nouveau_hw_fix_bad_vpll(dev, head); 811 nouveau_hw_fix_bad_vpll(dev, head);
1067 nv_save_state_ramdac(dev, head, state); 812 nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
index 06a66bc84a81..7dff1021fab4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.h
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -24,7 +24,9 @@
24#define __NOUVEAU_HW_H__ 24#define __NOUVEAU_HW_H__
25 25
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include "nouveau_drv.h" 27#include "nv04_display.h"
28
29#include <subdev/bios/pll.h>
28 30
29#define MASK(field) ( \ 31#define MASK(field) ( \
30 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field)) 32 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
@@ -38,12 +40,10 @@ void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
38uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index); 40uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
39void NVSetOwner(struct drm_device *, int owner); 41void NVSetOwner(struct drm_device *, int owner);
40void NVBlankScreen(struct drm_device *, int head, bool blank); 42void NVBlankScreen(struct drm_device *, int head, bool blank);
41void nouveau_hw_setpll(struct drm_device *, uint32_t reg1, 43int nouveau_hw_get_pllvals(struct drm_device *, enum nvbios_pll_type plltype,
42 struct nouveau_pll_vals *pv);
43int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
44 struct nouveau_pll_vals *pllvals); 44 struct nouveau_pll_vals *pllvals);
45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals); 45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
46int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype); 46int nouveau_hw_get_clock(struct drm_device *, enum nvbios_pll_type plltype);
47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save); 47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
48void nouveau_hw_save_state(struct drm_device *, int head, 48void nouveau_hw_save_state(struct drm_device *, int head,
49 struct nv04_mode_state *state); 49 struct nv04_mode_state *state);
@@ -55,115 +55,51 @@ void nouveau_hw_load_state_palette(struct drm_device *, int head,
55/* nouveau_calc.c */ 55/* nouveau_calc.c */
56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, 56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
57 int *burst, int *lwm); 57 int *burst, int *lwm);
58extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
59 int clk, struct nouveau_pll_vals *pv);
60
61static inline uint32_t
62nvReadMC(struct drm_device *dev, uint32_t reg)
63{
64 uint32_t val = nv_rd32(dev, reg);
65 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
66 return val;
67}
68
69static inline void
70nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
71{
72 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
73 nv_wr32(dev, reg, val);
74}
75
76static inline uint32_t
77nvReadVIDEO(struct drm_device *dev, uint32_t reg)
78{
79 uint32_t val = nv_rd32(dev, reg);
80 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
81 return val;
82}
83
84static inline void
85nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
86{
87 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
88 nv_wr32(dev, reg, val);
89}
90
91static inline uint32_t
92nvReadFB(struct drm_device *dev, uint32_t reg)
93{
94 uint32_t val = nv_rd32(dev, reg);
95 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
96 return val;
97}
98
99static inline void
100nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
101{
102 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
103 nv_wr32(dev, reg, val);
104}
105
106static inline uint32_t
107nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
108{
109 uint32_t val = nv_rd32(dev, reg);
110 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
111 return val;
112}
113
114static inline void
115nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
116{
117 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
118 nv_wr32(dev, reg, val);
119}
120 58
121static inline uint32_t NVReadCRTC(struct drm_device *dev, 59static inline uint32_t NVReadCRTC(struct drm_device *dev,
122 int head, uint32_t reg) 60 int head, uint32_t reg)
123{ 61{
62 struct nouveau_device *device = nouveau_dev(dev);
124 uint32_t val; 63 uint32_t val;
125 if (head) 64 if (head)
126 reg += NV_PCRTC0_SIZE; 65 reg += NV_PCRTC0_SIZE;
127 val = nv_rd32(dev, reg); 66 val = nv_rd32(device, reg);
128 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
129 return val; 67 return val;
130} 68}
131 69
132static inline void NVWriteCRTC(struct drm_device *dev, 70static inline void NVWriteCRTC(struct drm_device *dev,
133 int head, uint32_t reg, uint32_t val) 71 int head, uint32_t reg, uint32_t val)
134{ 72{
73 struct nouveau_device *device = nouveau_dev(dev);
135 if (head) 74 if (head)
136 reg += NV_PCRTC0_SIZE; 75 reg += NV_PCRTC0_SIZE;
137 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val); 76 nv_wr32(device, reg, val);
138 nv_wr32(dev, reg, val);
139} 77}
140 78
141static inline uint32_t NVReadRAMDAC(struct drm_device *dev, 79static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
142 int head, uint32_t reg) 80 int head, uint32_t reg)
143{ 81{
82 struct nouveau_device *device = nouveau_dev(dev);
144 uint32_t val; 83 uint32_t val;
145 if (head) 84 if (head)
146 reg += NV_PRAMDAC0_SIZE; 85 reg += NV_PRAMDAC0_SIZE;
147 val = nv_rd32(dev, reg); 86 val = nv_rd32(device, reg);
148 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
149 head, reg, val);
150 return val; 87 return val;
151} 88}
152 89
153static inline void NVWriteRAMDAC(struct drm_device *dev, 90static inline void NVWriteRAMDAC(struct drm_device *dev,
154 int head, uint32_t reg, uint32_t val) 91 int head, uint32_t reg, uint32_t val)
155{ 92{
93 struct nouveau_device *device = nouveau_dev(dev);
156 if (head) 94 if (head)
157 reg += NV_PRAMDAC0_SIZE; 95 reg += NV_PRAMDAC0_SIZE;
158 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n", 96 nv_wr32(device, reg, val);
159 head, reg, val);
160 nv_wr32(dev, reg, val);
161} 97}
162 98
163static inline uint8_t nv_read_tmds(struct drm_device *dev, 99static inline uint8_t nv_read_tmds(struct drm_device *dev,
164 int or, int dl, uint8_t address) 100 int or, int dl, uint8_t address)
165{ 101{
166 int ramdac = (or & OUTPUT_C) >> 2; 102 int ramdac = (or & DCB_OUTPUT_C) >> 2;
167 103
168 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, 104 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
169 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address); 105 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
@@ -174,7 +110,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
174 int or, int dl, uint8_t address, 110 int or, int dl, uint8_t address,
175 uint8_t data) 111 uint8_t data)
176{ 112{
177 int ramdac = (or & OUTPUT_C) >> 2; 113 int ramdac = (or & DCB_OUTPUT_C) >> 2;
178 114
179 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data); 115 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
180 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address); 116 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
@@ -183,20 +119,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
183static inline void NVWriteVgaCrtc(struct drm_device *dev, 119static inline void NVWriteVgaCrtc(struct drm_device *dev,
184 int head, uint8_t index, uint8_t value) 120 int head, uint8_t index, uint8_t value)
185{ 121{
186 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n", 122 struct nouveau_device *device = nouveau_dev(dev);
187 head, index, value); 123 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
188 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 124 nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
189 nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
190} 125}
191 126
192static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, 127static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
193 int head, uint8_t index) 128 int head, uint8_t index)
194{ 129{
130 struct nouveau_device *device = nouveau_dev(dev);
195 uint8_t val; 131 uint8_t val;
196 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); 132 nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
197 val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); 133 val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
198 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
199 head, index, val);
200 return val; 134 return val;
201} 135}
202 136
@@ -230,75 +164,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
230static inline uint8_t NVReadPRMVIO(struct drm_device *dev, 164static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
231 int head, uint32_t reg) 165 int head, uint32_t reg)
232{ 166{
233 struct drm_nouveau_private *dev_priv = dev->dev_private; 167 struct nouveau_device *device = nouveau_dev(dev);
168 struct nouveau_drm *drm = nouveau_drm(dev);
234 uint8_t val; 169 uint8_t val;
235 170
236 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 171 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
237 * NVSetOwner for the relevant head to be programmed */ 172 * NVSetOwner for the relevant head to be programmed */
238 if (head && dev_priv->card_type == NV_40) 173 if (head && nv_device(drm->device)->card_type == NV_40)
239 reg += NV_PRMVIO_SIZE; 174 reg += NV_PRMVIO_SIZE;
240 175
241 val = nv_rd08(dev, reg); 176 val = nv_rd08(device, reg);
242 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
243 return val; 177 return val;
244} 178}
245 179
246static inline void NVWritePRMVIO(struct drm_device *dev, 180static inline void NVWritePRMVIO(struct drm_device *dev,
247 int head, uint32_t reg, uint8_t value) 181 int head, uint32_t reg, uint8_t value)
248{ 182{
249 struct drm_nouveau_private *dev_priv = dev->dev_private; 183 struct nouveau_device *device = nouveau_dev(dev);
184 struct nouveau_drm *drm = nouveau_drm(dev);
250 185
251 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call 186 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
252 * NVSetOwner for the relevant head to be programmed */ 187 * NVSetOwner for the relevant head to be programmed */
253 if (head && dev_priv->card_type == NV_40) 188 if (head && nv_device(drm->device)->card_type == NV_40)
254 reg += NV_PRMVIO_SIZE; 189 reg += NV_PRMVIO_SIZE;
255 190
256 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", 191 nv_wr08(device, reg, value);
257 head, reg, value);
258 nv_wr08(dev, reg, value);
259} 192}
260 193
261static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) 194static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
262{ 195{
263 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 196 struct nouveau_device *device = nouveau_dev(dev);
264 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); 197 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
198 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
265} 199}
266 200
267static inline bool NVGetEnablePalette(struct drm_device *dev, int head) 201static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
268{ 202{
269 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 203 struct nouveau_device *device = nouveau_dev(dev);
270 return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); 204 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
205 return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
271} 206}
272 207
273static inline void NVWriteVgaAttr(struct drm_device *dev, 208static inline void NVWriteVgaAttr(struct drm_device *dev,
274 int head, uint8_t index, uint8_t value) 209 int head, uint8_t index, uint8_t value)
275{ 210{
211 struct nouveau_device *device = nouveau_dev(dev);
276 if (NVGetEnablePalette(dev, head)) 212 if (NVGetEnablePalette(dev, head))
277 index &= ~0x20; 213 index &= ~0x20;
278 else 214 else
279 index |= 0x20; 215 index |= 0x20;
280 216
281 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 217 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
282 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n", 218 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
283 head, index, value); 219 nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
284 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
285 nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
286} 220}
287 221
288static inline uint8_t NVReadVgaAttr(struct drm_device *dev, 222static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
289 int head, uint8_t index) 223 int head, uint8_t index)
290{ 224{
225 struct nouveau_device *device = nouveau_dev(dev);
291 uint8_t val; 226 uint8_t val;
292 if (NVGetEnablePalette(dev, head)) 227 if (NVGetEnablePalette(dev, head))
293 index &= ~0x20; 228 index &= ~0x20;
294 else 229 else
295 index |= 0x20; 230 index |= 0x20;
296 231
297 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); 232 nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
298 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index); 233 nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
299 val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE); 234 val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
300 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
301 head, index, val);
302 return val; 235 return val;
303} 236}
304 237
@@ -325,10 +258,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
325static inline bool 258static inline bool
326nv_heads_tied(struct drm_device *dev) 259nv_heads_tied(struct drm_device *dev)
327{ 260{
328 struct drm_nouveau_private *dev_priv = dev->dev_private; 261 struct nouveau_device *device = nouveau_dev(dev);
262 struct nouveau_drm *drm = nouveau_drm(dev);
329 263
330 if (dev_priv->chipset == 0x11) 264 if (nv_device(drm->device)->chipset == 0x11)
331 return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)); 265 return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
332 266
333 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; 267 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
334} 268}
@@ -377,13 +311,13 @@ nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
377static inline bool 311static inline bool
378NVLockVgaCrtcs(struct drm_device *dev, bool lock) 312NVLockVgaCrtcs(struct drm_device *dev, bool lock)
379{ 313{
380 struct drm_nouveau_private *dev_priv = dev->dev_private; 314 struct nouveau_drm *drm = nouveau_drm(dev);
381 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX); 315 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
382 316
383 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, 317 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
384 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); 318 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
385 /* NV11 has independently lockable extended crtcs, except when tied */ 319 /* NV11 has independently lockable extended crtcs, except when tied */
386 if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev)) 320 if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
387 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, 321 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
388 lock ? NV_CIO_SR_LOCK_VALUE : 322 lock ? NV_CIO_SR_LOCK_VALUE :
389 NV_CIO_SR_UNLOCK_RW_VALUE); 323 NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -398,9 +332,9 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
398 332
399static inline int nv_cursor_width(struct drm_device *dev) 333static inline int nv_cursor_width(struct drm_device *dev)
400{ 334{
401 struct drm_nouveau_private *dev_priv = dev->dev_private; 335 struct nouveau_drm *drm = nouveau_drm(dev);
402 336
403 return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; 337 return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
404} 338}
405 339
406static inline void 340static inline void
@@ -418,11 +352,11 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
418static inline void 352static inline void
419nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) 353nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
420{ 354{
421 struct drm_nouveau_private *dev_priv = dev->dev_private; 355 struct nouveau_drm *drm = nouveau_drm(dev);
422 356
423 NVWriteCRTC(dev, head, NV_PCRTC_START, offset); 357 NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
424 358
425 if (dev_priv->card_type == NV_04) { 359 if (nv_device(drm->device)->card_type == NV_04) {
426 /* 360 /*
427 * Hilarious, the 24th bit doesn't want to stick to 361 * Hilarious, the 24th bit doesn't want to stick to
428 * PCRTC_START... 362 * PCRTC_START...
@@ -437,9 +371,9 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
437static inline void 371static inline void
438nv_show_cursor(struct drm_device *dev, int head, bool show) 372nv_show_cursor(struct drm_device *dev, int head, bool show)
439{ 373{
440 struct drm_nouveau_private *dev_priv = dev->dev_private; 374 struct nouveau_drm *drm = nouveau_drm(dev);
441 uint8_t *curctl1 = 375 uint8_t *curctl1 =
442 &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX]; 376 &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
443 377
444 if (show) 378 if (show)
445 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 379 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
@@ -447,14 +381,14 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
447 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); 381 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
448 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); 382 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
449 383
450 if (dev_priv->card_type == NV_40) 384 if (nv_device(drm->device)->card_type == NV_40)
451 nv_fix_nv40_hw_cursor(dev, head); 385 nv_fix_nv40_hw_cursor(dev, head);
452} 386}
453 387
454static inline uint32_t 388static inline uint32_t
455nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) 389nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
456{ 390{
457 struct drm_nouveau_private *dev_priv = dev->dev_private; 391 struct nouveau_drm *drm = nouveau_drm(dev);
458 int mask; 392 int mask;
459 393
460 if (bpp == 15) 394 if (bpp == 15)
@@ -463,7 +397,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
463 bpp = 8; 397 bpp = 8;
464 398
465 /* Alignment requirements taken from the Haiku driver */ 399 /* Alignment requirements taken from the Haiku driver */
466 if (dev_priv->card_type == NV_04) 400 if (nv_device(drm->device)->card_type == NV_04)
467 mask = 128 / bpp - 1; 401 mask = 128 / bpp - 1;
468 else 402 else
469 mask = 512 / bpp - 1; 403 mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
deleted file mode 100644
index baf2fa25d077..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ /dev/null
@@ -1,394 +0,0 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/module.h>
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_i2c.h"
30#include "nouveau_hw.h"
31
32static void
33i2c_drive_scl(void *data, int state)
34{
35 struct nouveau_i2c_chan *port = data;
36 if (port->type == 0) {
37 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
38 if (state) val |= 0x20;
39 else val &= 0xdf;
40 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
41 } else
42 if (port->type == 4) {
43 nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
44 } else
45 if (port->type == 5) {
46 if (state) port->state |= 0x01;
47 else port->state &= 0xfe;
48 nv_wr32(port->dev, port->drive, 4 | port->state);
49 }
50}
51
52static void
53i2c_drive_sda(void *data, int state)
54{
55 struct nouveau_i2c_chan *port = data;
56 if (port->type == 0) {
57 u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
58 if (state) val |= 0x10;
59 else val &= 0xef;
60 NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
61 } else
62 if (port->type == 4) {
63 nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
64 } else
65 if (port->type == 5) {
66 if (state) port->state |= 0x02;
67 else port->state &= 0xfd;
68 nv_wr32(port->dev, port->drive, 4 | port->state);
69 }
70}
71
72static int
73i2c_sense_scl(void *data)
74{
75 struct nouveau_i2c_chan *port = data;
76 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
77 if (port->type == 0) {
78 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
79 } else
80 if (port->type == 4) {
81 return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
82 } else
83 if (port->type == 5) {
84 if (dev_priv->card_type < NV_D0)
85 return !!(nv_rd32(port->dev, port->sense) & 0x01);
86 else
87 return !!(nv_rd32(port->dev, port->sense) & 0x10);
88 }
89 return 0;
90}
91
92static int
93i2c_sense_sda(void *data)
94{
95 struct nouveau_i2c_chan *port = data;
96 struct drm_nouveau_private *dev_priv = port->dev->dev_private;
97 if (port->type == 0) {
98 return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
99 } else
100 if (port->type == 4) {
101 return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
102 } else
103 if (port->type == 5) {
104 if (dev_priv->card_type < NV_D0)
105 return !!(nv_rd32(port->dev, port->sense) & 0x02);
106 else
107 return !!(nv_rd32(port->dev, port->sense) & 0x20);
108 }
109 return 0;
110}
111
112static const uint32_t nv50_i2c_port[] = {
113 0x00e138, 0x00e150, 0x00e168, 0x00e180,
114 0x00e254, 0x00e274, 0x00e764, 0x00e780,
115 0x00e79c, 0x00e7b8
116};
117
118static u8 *
119i2c_table(struct drm_device *dev, u8 *version)
120{
121 u8 *dcb = dcb_table(dev), *i2c = NULL;
122 if (dcb) {
123 if (dcb[0] >= 0x15)
124 i2c = ROMPTR(dev, dcb[2]);
125 if (dcb[0] >= 0x30)
126 i2c = ROMPTR(dev, dcb[4]);
127 }
128
129 /* early revisions had no version number, use dcb version */
130 if (i2c) {
131 *version = dcb[0];
132 if (*version >= 0x30)
133 *version = i2c[0];
134 }
135
136 return i2c;
137}
138
139int
140nouveau_i2c_init(struct drm_device *dev)
141{
142 struct drm_nouveau_private *dev_priv = dev->dev_private;
143 struct nvbios *bios = &dev_priv->vbios;
144 struct nouveau_i2c_chan *port;
145 u8 version = 0x00, entries, recordlen;
146 u8 *i2c, *entry, legacy[2][4] = {};
147 int ret, i;
148
149 INIT_LIST_HEAD(&dev_priv->i2c_ports);
150
151 i2c = i2c_table(dev, &version);
152 if (!i2c) {
153 u8 *bmp = &bios->data[bios->offset];
154 if (bios->type != NVBIOS_BMP)
155 return -ENODEV;
156
157 legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
158 legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
159 legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
160 legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
161
162 /* BMP (from v4.0) has i2c info in the structure, it's in a
163 * fixed location on earlier VBIOS
164 */
165 if (bmp[5] < 4)
166 i2c = &bios->data[0x48];
167 else
168 i2c = &bmp[0x36];
169
170 if (i2c[4]) legacy[0][0] = i2c[4];
171 if (i2c[5]) legacy[0][1] = i2c[5];
172 if (i2c[6]) legacy[1][0] = i2c[6];
173 if (i2c[7]) legacy[1][1] = i2c[7];
174 }
175
176 if (version >= 0x30) {
177 entry = i2c[1] + i2c;
178 entries = i2c[2];
179 recordlen = i2c[3];
180 } else
181 if (version) {
182 entry = i2c;
183 entries = 16;
184 recordlen = 4;
185 } else {
186 entry = legacy[0];
187 entries = 2;
188 recordlen = 4;
189 }
190
191 for (i = 0; i < entries; i++, entry += recordlen) {
192 port = kzalloc(sizeof(*port), GFP_KERNEL);
193 if (port == NULL) {
194 nouveau_i2c_fini(dev);
195 return -ENOMEM;
196 }
197
198 port->type = entry[3];
199 if (version < 0x30) {
200 port->type &= 0x07;
201 if (port->type == 0x07)
202 port->type = 0xff;
203 }
204
205 if (port->type == 0xff) {
206 kfree(port);
207 continue;
208 }
209
210 switch (port->type) {
211 case 0: /* NV04:NV50 */
212 port->drive = entry[0];
213 port->sense = entry[1];
214 break;
215 case 4: /* NV4E */
216 port->drive = 0x600800 + entry[1];
217 port->sense = port->drive;
218 break;
219 case 5: /* NV50- */
220 port->drive = entry[0] & 0x0f;
221 if (dev_priv->card_type < NV_D0) {
222 if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
223 break;
224 port->drive = nv50_i2c_port[port->drive];
225 port->sense = port->drive;
226 } else {
227 port->drive = 0x00d014 + (port->drive * 0x20);
228 port->sense = port->drive;
229 }
230 break;
231 case 6: /* NV50- DP AUX */
232 port->drive = entry[0] & 0x0f;
233 port->sense = port->drive;
234 port->adapter.algo = &nouveau_dp_i2c_algo;
235 break;
236 default:
237 break;
238 }
239
240 if (!port->adapter.algo && !port->drive) {
241 NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
242 i, port->type, port->drive, port->sense);
243 kfree(port);
244 continue;
245 }
246
247 snprintf(port->adapter.name, sizeof(port->adapter.name),
248 "nouveau-%s-%d", pci_name(dev->pdev), i);
249 port->adapter.owner = THIS_MODULE;
250 port->adapter.dev.parent = &dev->pdev->dev;
251 port->dev = dev;
252 port->index = i;
253 port->dcb = ROM32(entry[0]);
254 i2c_set_adapdata(&port->adapter, i2c);
255
256 if (port->adapter.algo != &nouveau_dp_i2c_algo) {
257 port->adapter.algo_data = &port->bit;
258 port->bit.udelay = 10;
259 port->bit.timeout = usecs_to_jiffies(2200);
260 port->bit.data = port;
261 port->bit.setsda = i2c_drive_sda;
262 port->bit.setscl = i2c_drive_scl;
263 port->bit.getsda = i2c_sense_sda;
264 port->bit.getscl = i2c_sense_scl;
265
266 i2c_drive_scl(port, 0);
267 i2c_drive_sda(port, 1);
268 i2c_drive_scl(port, 1);
269
270 ret = i2c_bit_add_bus(&port->adapter);
271 } else {
272 port->adapter.algo = &nouveau_dp_i2c_algo;
273 ret = i2c_add_adapter(&port->adapter);
274 }
275
276 if (ret) {
277 NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
278 kfree(port);
279 continue;
280 }
281
282 list_add_tail(&port->head, &dev_priv->i2c_ports);
283 }
284
285 return 0;
286}
287
288void
289nouveau_i2c_fini(struct drm_device *dev)
290{
291 struct drm_nouveau_private *dev_priv = dev->dev_private;
292 struct nouveau_i2c_chan *port, *tmp;
293
294 list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
295 i2c_del_adapter(&port->adapter);
296 kfree(port);
297 }
298}
299
300struct nouveau_i2c_chan *
301nouveau_i2c_find(struct drm_device *dev, u8 index)
302{
303 struct drm_nouveau_private *dev_priv = dev->dev_private;
304 struct nouveau_i2c_chan *port;
305
306 if (index == NV_I2C_DEFAULT(0) ||
307 index == NV_I2C_DEFAULT(1)) {
308 u8 version, *i2c = i2c_table(dev, &version);
309 if (i2c && version >= 0x30) {
310 if (index == NV_I2C_DEFAULT(0))
311 index = (i2c[4] & 0x0f);
312 else
313 index = (i2c[4] & 0xf0) >> 4;
314 } else {
315 index = 2;
316 }
317 }
318
319 list_for_each_entry(port, &dev_priv->i2c_ports, head) {
320 if (port->index == index)
321 break;
322 }
323
324 if (&port->head == &dev_priv->i2c_ports)
325 return NULL;
326
327 if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
328 u32 reg = 0x00e500, val;
329 if (port->type == 6) {
330 reg += port->drive * 0x50;
331 val = 0x2002;
332 } else {
333 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
334 val = 0xe001;
335 }
336
337 /* nfi, but neither auxch or i2c work if it's 1 */
338 nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
339 /* nfi, but switches auxch vs normal i2c */
340 nv_mask(dev, reg + 0x00, 0x0000f003, val);
341 }
342
343 return port;
344}
345
346bool
347nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
348{
349 uint8_t buf[] = { 0 };
350 struct i2c_msg msgs[] = {
351 {
352 .addr = addr,
353 .flags = 0,
354 .len = 1,
355 .buf = buf,
356 },
357 {
358 .addr = addr,
359 .flags = I2C_M_RD,
360 .len = 1,
361 .buf = buf,
362 }
363 };
364
365 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
366}
367
368int
369nouveau_i2c_identify(struct drm_device *dev, const char *what,
370 struct i2c_board_info *info,
371 bool (*match)(struct nouveau_i2c_chan *,
372 struct i2c_board_info *),
373 int index)
374{
375 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
376 int i;
377
378 if (!i2c) {
379 NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
380 return -ENODEV;
381 }
382
383 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
384 for (i = 0; info[i].addr; i++) {
385 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
386 (!match || match(i2c, &info[i]))) {
387 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
388 return i;
389 }
390 }
391
392 NV_DEBUG(dev, "No devices found.\n");
393 return -ENODEV;
394}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index aa3a067c707b..08214bcdcb12 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -35,7 +35,7 @@
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37 37
38#include "nouveau_drv.h" 38#include "nouveau_ioctl.h"
39 39
40/** 40/**
41 * Called whenever a 32-bit process running under a 64-bit kernel 41 * Called whenever a 32-bit process running under a 64-bit kernel
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
new file mode 100644
index 000000000000..ef2b2906d9e6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -0,0 +1,6 @@
1#ifndef __NOUVEAU_IOCTL_H__
2#define __NOUVEAU_IOCTL_H__
3
4long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
5
6#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 6273b7763cd6..9ca8afdb5549 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -1,146 +1,86 @@
1/* 1/*
2 * Copyright (C) 2006 Ben Skeggs. 2 * Copyright 2012 Red Hat Inc.
3 * 3 *
4 * All Rights Reserved. 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
5 * 10 *
6 * Permission is hereby granted, free of charge, to any person obtaining 11 * The above copyright notice and this permission notice shall be included in
7 * a copy of this software and associated documentation files (the 12 * all copies or substantial portions of the Software.
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 * 13 *
14 * The above copyright notice and this permission notice (including the 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * next paragraph) shall be included in all copies or substantial 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * portions of the Software. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * 21 *
22 * Authors: Ben Skeggs
26 */ 23 */
27 24
28/* 25#include <subdev/mc.h>
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32 26
33#include <drm/drmP.h> 27#include "nouveau_drm.h"
34#include <drm/nouveau_drm.h> 28#include "nouveau_irq.h"
35#include "nouveau_drv.h" 29#include "nv50_display.h"
36#include "nouveau_reg.h"
37#include "nouveau_ramht.h"
38#include "nouveau_util.h"
39 30
40void 31void
41nouveau_irq_preinstall(struct drm_device *dev) 32nouveau_irq_preinstall(struct drm_device *dev)
42{ 33{
43 /* Master disable */ 34 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
44 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
45} 35}
46 36
47int 37int
48nouveau_irq_postinstall(struct drm_device *dev) 38nouveau_irq_postinstall(struct drm_device *dev)
49{ 39{
50 struct drm_nouveau_private *dev_priv = dev->dev_private; 40 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000001);
51
52 /* Master enable */
53 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
54 if (dev_priv->msi_enabled)
55 nv_wr08(dev, 0x00088068, 0xff);
56
57 return 0; 41 return 0;
58} 42}
59 43
60void 44void
61nouveau_irq_uninstall(struct drm_device *dev) 45nouveau_irq_uninstall(struct drm_device *dev)
62{ 46{
63 /* Master disable */ 47 nv_wr32(nouveau_dev(dev), 0x000140, 0x00000000);
64 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
65} 48}
66 49
67irqreturn_t 50irqreturn_t
68nouveau_irq_handler(DRM_IRQ_ARGS) 51nouveau_irq_handler(DRM_IRQ_ARGS)
69{ 52{
70 struct drm_device *dev = (struct drm_device *)arg; 53 struct drm_device *dev = arg;
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 54 struct nouveau_device *device = nouveau_dev(dev);
72 unsigned long flags; 55 struct nouveau_mc *pmc = nouveau_mc(device);
73 u32 stat; 56 u32 stat;
74 int i;
75 57
76 stat = nv_rd32(dev, NV03_PMC_INTR_0); 58 stat = nv_rd32(device, 0x000100);
77 if (stat == 0 || stat == ~0) 59 if (stat == 0 || stat == ~0)
78 return IRQ_NONE; 60 return IRQ_NONE;
79 61
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 62 nv_subdev(pmc)->intr(nv_subdev(pmc));
81 for (i = 0; i < 32 && stat; i++) {
82 if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
83 continue;
84 63
85 dev_priv->irq_handler[i](dev); 64 if (device->card_type >= NV_D0) {
86 stat &= ~(1 << i); 65 if (nv_rd32(device, 0x000100) & 0x04000000)
66 nvd0_display_intr(dev);
67 } else
68 if (device->card_type >= NV_50) {
69 if (nv_rd32(device, 0x000100) & 0x04000000)
70 nv50_display_intr(dev);
87 } 71 }
88 72
89 if (dev_priv->msi_enabled)
90 nv_wr08(dev, 0x00088068, 0xff);
91 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
92
93 if (stat && nouveau_ratelimit())
94 NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
95 return IRQ_HANDLED; 73 return IRQ_HANDLED;
96} 74}
97 75
98int 76int
99nouveau_irq_init(struct drm_device *dev) 77nouveau_irq_init(struct drm_device *dev)
100{ 78{
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 int ret;
103
104 if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
105 ret = pci_enable_msi(dev->pdev);
106 if (ret == 0) {
107 NV_INFO(dev, "enabled MSI\n");
108 dev_priv->msi_enabled = true;
109 }
110 }
111
112 return drm_irq_install(dev); 79 return drm_irq_install(dev);
113} 80}
114 81
115void 82void
116nouveau_irq_fini(struct drm_device *dev) 83nouveau_irq_fini(struct drm_device *dev)
117{ 84{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119
120 drm_irq_uninstall(dev); 85 drm_irq_uninstall(dev);
121 if (dev_priv->msi_enabled)
122 pci_disable_msi(dev->pdev);
123}
124
125void
126nouveau_irq_register(struct drm_device *dev, int status_bit,
127 void (*handler)(struct drm_device *))
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 unsigned long flags;
131
132 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
133 dev_priv->irq_handler[status_bit] = handler;
134 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
135}
136
137void
138nouveau_irq_unregister(struct drm_device *dev, int status_bit)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 unsigned long flags;
142
143 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
144 dev_priv->irq_handler[status_bit] = NULL;
145 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
146} 86}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.h b/drivers/gpu/drm/nouveau/nouveau_irq.h
new file mode 100644
index 000000000000..06714ad857bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.h
@@ -0,0 +1,11 @@
1#ifndef __NOUVEAU_IRQ_H__
2#define __NOUVEAU_IRQ_H__
3
4extern int nouveau_irq_init(struct drm_device *);
5extern void nouveau_irq_fini(struct drm_device *);
6extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
7extern void nouveau_irq_preinstall(struct drm_device *);
8extern int nouveau_irq_postinstall(struct drm_device *);
9extern void nouveau_irq_uninstall(struct drm_device *);
10
11#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 7f0afad13653..7e0ff10a2759 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -30,446 +30,10 @@
30 * Roy Spliet <r.spliet@student.tudelft.nl> 30 * Roy Spliet <r.spliet@student.tudelft.nl>
31 */ 31 */
32 32
33 33#include "nouveau_drm.h"
34#include <drm/drmP.h>
35
36#include "nouveau_drv.h"
37#include "nouveau_pm.h" 34#include "nouveau_pm.h"
38#include "nouveau_mm.h"
39#include "nouveau_vm.h"
40#include "nouveau_fifo.h"
41#include "nouveau_fence.h"
42
43/*
44 * NV10-NV40 tiling helpers
45 */
46
47static void
48nv10_mem_update_tile_region(struct drm_device *dev,
49 struct nouveau_tile_reg *tile, uint32_t addr,
50 uint32_t size, uint32_t pitch, uint32_t flags)
51{
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
54 int i = tile - dev_priv->tile.reg, j;
55 unsigned long save;
56
57 nouveau_fence_unref(&tile->fence);
58
59 if (tile->pitch)
60 pfb->free_tile_region(dev, i);
61
62 if (pitch)
63 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
64
65 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
66 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
67 nv04_fifo_cache_pull(dev, false);
68
69 nouveau_wait_for_idle(dev);
70
71 pfb->set_tile_region(dev, i);
72 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
73 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
74 dev_priv->eng[j]->set_tile_region(dev, i);
75 }
76
77 nv04_fifo_cache_pull(dev, true);
78 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
79 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
80}
81
82static struct nouveau_tile_reg *
83nv10_mem_get_tile_region(struct drm_device *dev, int i)
84{
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
87
88 spin_lock(&dev_priv->tile.lock);
89
90 if (!tile->used &&
91 (!tile->fence || nouveau_fence_done(tile->fence)))
92 tile->used = true;
93 else
94 tile = NULL;
95
96 spin_unlock(&dev_priv->tile.lock);
97 return tile;
98}
99
100void
101nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
102 struct nouveau_fence *fence)
103{
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105
106 if (tile) {
107 spin_lock(&dev_priv->tile.lock);
108 if (fence) {
109 /* Mark it as pending. */
110 tile->fence = fence;
111 nouveau_fence_ref(fence);
112 }
113
114 tile->used = false;
115 spin_unlock(&dev_priv->tile.lock);
116 }
117}
118
119struct nouveau_tile_reg *
120nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
121 uint32_t pitch, uint32_t flags)
122{
123 struct drm_nouveau_private *dev_priv = dev->dev_private;
124 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
125 struct nouveau_tile_reg *tile, *found = NULL;
126 int i;
127
128 for (i = 0; i < pfb->num_tiles; i++) {
129 tile = nv10_mem_get_tile_region(dev, i);
130
131 if (pitch && !found) {
132 found = tile;
133 continue;
134
135 } else if (tile && tile->pitch) {
136 /* Kill an unused tile region. */
137 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
138 }
139
140 nv10_mem_put_tile_region(dev, tile, NULL);
141 }
142
143 if (found)
144 nv10_mem_update_tile_region(dev, found, addr, size,
145 pitch, flags);
146 return found;
147}
148
149/*
150 * Cleanup everything
151 */
152void
153nouveau_mem_vram_fini(struct drm_device *dev)
154{
155 struct drm_nouveau_private *dev_priv = dev->dev_private;
156
157 ttm_bo_device_release(&dev_priv->ttm.bdev);
158
159 nouveau_ttm_global_release(dev_priv);
160
161 if (dev_priv->fb_mtrr >= 0) {
162 drm_mtrr_del(dev_priv->fb_mtrr,
163 pci_resource_start(dev->pdev, 1),
164 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
165 dev_priv->fb_mtrr = -1;
166 }
167}
168
169void
170nouveau_mem_gart_fini(struct drm_device *dev)
171{
172 nouveau_sgdma_takedown(dev);
173
174 if (drm_core_has_AGP(dev) && dev->agp) {
175 struct drm_agp_mem *entry, *tempe;
176
177 /* Remove AGP resources, but leave dev->agp
178 intact until drv_cleanup is called. */
179 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
180 if (entry->bound)
181 drm_unbind_agp(entry->memory);
182 drm_free_agp(entry->memory, entry->pages);
183 kfree(entry);
184 }
185 INIT_LIST_HEAD(&dev->agp->memory);
186 35
187 if (dev->agp->acquired) 36#include <subdev/fb.h>
188 drm_agp_release(dev);
189
190 dev->agp->acquired = 0;
191 dev->agp->enabled = 0;
192 }
193}
194
195bool
196nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
197{
198 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
199 return true;
200
201 return false;
202}
203
204#if __OS_HAS_AGP
205static unsigned long
206get_agp_mode(struct drm_device *dev, unsigned long mode)
207{
208 struct drm_nouveau_private *dev_priv = dev->dev_private;
209
210 /*
211 * FW seems to be broken on nv18, it makes the card lock up
212 * randomly.
213 */
214 if (dev_priv->chipset == 0x18)
215 mode &= ~PCI_AGP_COMMAND_FW;
216
217 /*
218 * AGP mode set in the command line.
219 */
220 if (nouveau_agpmode > 0) {
221 bool agpv3 = mode & 0x8;
222 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
223
224 mode = (mode & ~0x7) | (rate & 0x7);
225 }
226
227 return mode;
228}
229#endif
230
231int
232nouveau_mem_reset_agp(struct drm_device *dev)
233{
234#if __OS_HAS_AGP
235 uint32_t saved_pci_nv_1, pmc_enable;
236 int ret;
237
238 /* First of all, disable fast writes, otherwise if it's
239 * already enabled in the AGP bridge and we disable the card's
240 * AGP controller we might be locking ourselves out of it. */
241 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
242 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
243 struct drm_agp_info info;
244 struct drm_agp_mode mode;
245
246 ret = drm_agp_info(dev, &info);
247 if (ret)
248 return ret;
249
250 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
251 ret = drm_agp_enable(dev, mode);
252 if (ret)
253 return ret;
254 }
255
256 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
257
258 /* clear busmaster bit */
259 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
260 /* disable AGP */
261 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
262
263 /* power cycle pgraph, if enabled */
264 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
265 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
266 nv_wr32(dev, NV03_PMC_ENABLE,
267 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
268 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
269 NV_PMC_ENABLE_PGRAPH);
270 }
271
272 /* and restore (gives effect of resetting AGP) */
273 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
274#endif
275
276 return 0;
277}
278
279int
280nouveau_mem_init_agp(struct drm_device *dev)
281{
282#if __OS_HAS_AGP
283 struct drm_nouveau_private *dev_priv = dev->dev_private;
284 struct drm_agp_info info;
285 struct drm_agp_mode mode;
286 int ret;
287
288 if (!dev->agp->acquired) {
289 ret = drm_agp_acquire(dev);
290 if (ret) {
291 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
292 return ret;
293 }
294 }
295
296 nouveau_mem_reset_agp(dev);
297
298 ret = drm_agp_info(dev, &info);
299 if (ret) {
300 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
301 return ret;
302 }
303
304 /* see agp.h for the AGPSTAT_* modes available */
305 mode.mode = get_agp_mode(dev, info.mode);
306 ret = drm_agp_enable(dev, mode);
307 if (ret) {
308 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
309 return ret;
310 }
311
312 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
313 dev_priv->gart_info.aper_base = info.aperture_base;
314 dev_priv->gart_info.aper_size = info.aperture_size;
315#endif
316 return 0;
317}
318
319static const struct vram_types {
320 int value;
321 const char *name;
322} vram_type_map[] = {
323 { NV_MEM_TYPE_STOLEN , "stolen system memory" },
324 { NV_MEM_TYPE_SGRAM , "SGRAM" },
325 { NV_MEM_TYPE_SDRAM , "SDRAM" },
326 { NV_MEM_TYPE_DDR1 , "DDR1" },
327 { NV_MEM_TYPE_DDR2 , "DDR2" },
328 { NV_MEM_TYPE_DDR3 , "DDR3" },
329 { NV_MEM_TYPE_GDDR2 , "GDDR2" },
330 { NV_MEM_TYPE_GDDR3 , "GDDR3" },
331 { NV_MEM_TYPE_GDDR4 , "GDDR4" },
332 { NV_MEM_TYPE_GDDR5 , "GDDR5" },
333 { NV_MEM_TYPE_UNKNOWN, "unknown type" }
334};
335
336int
337nouveau_mem_vram_init(struct drm_device *dev)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
341 const struct vram_types *vram_type;
342 int ret, dma_bits;
343
344 dma_bits = 32;
345 if (dev_priv->card_type >= NV_50) {
346 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
347 dma_bits = 40;
348 } else
349 if (0 && pci_is_pcie(dev->pdev) &&
350 dev_priv->chipset > 0x40 &&
351 dev_priv->chipset != 0x45) {
352 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
353 dma_bits = 39;
354 }
355
356 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
357 if (ret)
358 return ret;
359 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
360 if (ret) {
361 /* Reset to default value. */
362 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
363 }
364
365
366 ret = nouveau_ttm_global_init(dev_priv);
367 if (ret)
368 return ret;
369
370 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
371 dev_priv->ttm.bo_global_ref.ref.object,
372 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
373 dma_bits <= 32 ? true : false);
374 if (ret) {
375 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
376 return ret;
377 }
378
379 vram_type = vram_type_map;
380 while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
381 if (nouveau_vram_type) {
382 if (!strcasecmp(nouveau_vram_type, vram_type->name))
383 break;
384 dev_priv->vram_type = vram_type->value;
385 } else {
386 if (vram_type->value == dev_priv->vram_type)
387 break;
388 }
389 vram_type++;
390 }
391
392 NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
393 (int)(dev_priv->vram_size >> 20), vram_type->name);
394 if (dev_priv->vram_sys_base) {
395 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
396 dev_priv->vram_sys_base);
397 }
398
399 dev_priv->fb_available_size = dev_priv->vram_size;
400 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
401 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
402 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
403 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
404
405 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
406 dev_priv->fb_aper_free = dev_priv->fb_available_size;
407
408 /* mappable vram */
409 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
410 dev_priv->fb_available_size >> PAGE_SHIFT);
411 if (ret) {
412 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
413 return ret;
414 }
415
416 if (dev_priv->card_type < NV_50) {
417 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
418 0, 0, NULL, &dev_priv->vga_ram);
419 if (ret == 0)
420 ret = nouveau_bo_pin(dev_priv->vga_ram,
421 TTM_PL_FLAG_VRAM);
422
423 if (ret) {
424 NV_WARN(dev, "failed to reserve VGA memory\n");
425 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
426 }
427 }
428
429 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
430 pci_resource_len(dev->pdev, 1),
431 DRM_MTRR_WC);
432 return 0;
433}
434
435int
436nouveau_mem_gart_init(struct drm_device *dev)
437{
438 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
440 int ret;
441
442 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
443
444#if !defined(__powerpc__) && !defined(__ia64__)
445 if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
446 ret = nouveau_mem_init_agp(dev);
447 if (ret)
448 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
449 }
450#endif
451
452 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
453 ret = nouveau_sgdma_init(dev);
454 if (ret) {
455 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
456 return ret;
457 }
458 }
459
460 NV_INFO(dev, "%d MiB GART (aperture)\n",
461 (int)(dev_priv->gart_info.aper_size >> 20));
462 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
463
464 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
465 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
466 if (ret) {
467 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
468 return ret;
469 }
470
471 return 0;
472}
473 37
474static int 38static int
475nv40_mem_timing_calc(struct drm_device *dev, u32 freq, 39nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
@@ -477,6 +41,8 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
477 struct nouveau_pm_memtiming *boot, 41 struct nouveau_pm_memtiming *boot,
478 struct nouveau_pm_memtiming *t) 42 struct nouveau_pm_memtiming *t)
479{ 43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45
480 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC); 46 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
481 47
482 /* XXX: I don't trust the -1's and +1's... they must come 48 /* XXX: I don't trust the -1's and +1's... they must come
@@ -492,7 +58,7 @@ nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
492 e->tRCDWR << 8 | 58 e->tRCDWR << 8 |
493 e->tRCDRD); 59 e->tRCDRD);
494 60
495 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id, 61 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x\n", t->id,
496 t->reg[0], t->reg[1], t->reg[2]); 62 t->reg[0], t->reg[1], t->reg[2]);
497 return 0; 63 return 0;
498} 64}
@@ -503,7 +69,9 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
503 struct nouveau_pm_memtiming *boot, 69 struct nouveau_pm_memtiming *boot,
504 struct nouveau_pm_memtiming *t) 70 struct nouveau_pm_memtiming *t)
505{ 71{
506 struct drm_nouveau_private *dev_priv = dev->dev_private; 72 struct nouveau_device *device = nouveau_dev(dev);
73 struct nouveau_fb *pfb = nouveau_fb(device);
74 struct nouveau_drm *drm = nouveau_drm(dev);
507 struct bit_entry P; 75 struct bit_entry P;
508 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3; 76 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
509 77
@@ -557,7 +125,7 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
557 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16; 125 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
558 126
559 /* XXX: P.version == 1 only has DDR2 and GDDR3? */ 127 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
560 if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) { 128 if (pfb->ram.type == NV_MEM_TYPE_DDR2) {
561 t->reg[5] |= (e->tCL + 3) << 8; 129 t->reg[5] |= (e->tCL + 3) << 8;
562 t->reg[6] |= (t->tCWL - 2) << 8; 130 t->reg[6] |= (t->tCWL - 2) << 8;
563 t->reg[8] |= (e->tCL - 4); 131 t->reg[8] |= (e->tCL - 4);
@@ -590,11 +158,11 @@ nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
590 0x202; 158 0x202;
591 } 159 }
592 160
593 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id, 161 NV_DEBUG(drm, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
594 t->reg[0], t->reg[1], t->reg[2], t->reg[3]); 162 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
595 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n", 163 NV_DEBUG(drm, " 230: %08x %08x %08x %08x\n",
596 t->reg[4], t->reg[5], t->reg[6], t->reg[7]); 164 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
597 NV_DEBUG(dev, " 240: %08x\n", t->reg[8]); 165 NV_DEBUG(drm, " 240: %08x\n", t->reg[8]);
598 return 0; 166 return 0;
599} 167}
600 168
@@ -604,6 +172,8 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
604 struct nouveau_pm_memtiming *boot, 172 struct nouveau_pm_memtiming *boot,
605 struct nouveau_pm_memtiming *t) 173 struct nouveau_pm_memtiming *t)
606{ 174{
175 struct nouveau_drm *drm = nouveau_drm(dev);
176
607 if (e->tCWL > 0) 177 if (e->tCWL > 0)
608 t->tCWL = e->tCWL; 178 t->tCWL = e->tCWL;
609 179
@@ -626,9 +196,9 @@ nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
626 t->reg[4] = (boot->reg[4] & 0xfff00fff) | 196 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
627 (e->tRRD&0x1f) << 15; 197 (e->tRRD&0x1f) << 15;
628 198
629 NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id, 199 NV_DEBUG(drm, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
630 t->reg[0], t->reg[1], t->reg[2], t->reg[3]); 200 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
631 NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]); 201 NV_DEBUG(drm, " 2a0: %08x\n", t->reg[4]);
632 return 0; 202 return 0;
633} 203}
634 204
@@ -642,6 +212,8 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
642 struct nouveau_pm_memtiming *boot, 212 struct nouveau_pm_memtiming *boot,
643 struct nouveau_pm_memtiming *t) 213 struct nouveau_pm_memtiming *t)
644{ 214{
215 struct nouveau_drm *drm = nouveau_drm(dev);
216
645 t->drive_strength = 0; 217 t->drive_strength = 0;
646 if (len < 15) { 218 if (len < 15) {
647 t->odt = boot->odt; 219 t->odt = boot->odt;
@@ -650,17 +222,17 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
650 } 222 }
651 223
652 if (e->tCL >= NV_MEM_CL_DDR2_MAX) { 224 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
653 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 225 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
654 return -ERANGE; 226 return -ERANGE;
655 } 227 }
656 228
657 if (e->tWR >= NV_MEM_WR_DDR2_MAX) { 229 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
658 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 230 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
659 return -ERANGE; 231 return -ERANGE;
660 } 232 }
661 233
662 if (t->odt > 3) { 234 if (t->odt > 3) {
663 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x", 235 NV_WARN(drm, "(%u) Invalid odt value, assuming disabled: %x",
664 t->id, t->odt); 236 t->id, t->odt);
665 t->odt = 0; 237 t->odt = 0;
666 } 238 }
@@ -672,11 +244,11 @@ nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
672 (t->odt & 0x1) << 2 | 244 (t->odt & 0x1) << 2 |
673 (t->odt & 0x2) << 5; 245 (t->odt & 0x2) << 5;
674 246
675 NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]); 247 NV_DEBUG(drm, "(%u) MR: %08x", t->id, t->mr[0]);
676 return 0; 248 return 0;
677} 249}
678 250
679uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = { 251static const uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
680 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; 252 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
681 253
682static int 254static int
@@ -685,6 +257,7 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
685 struct nouveau_pm_memtiming *boot, 257 struct nouveau_pm_memtiming *boot,
686 struct nouveau_pm_memtiming *t) 258 struct nouveau_pm_memtiming *t)
687{ 259{
260 struct nouveau_drm *drm = nouveau_drm(dev);
688 u8 cl = e->tCL - 4; 261 u8 cl = e->tCL - 4;
689 262
690 t->drive_strength = 0; 263 t->drive_strength = 0;
@@ -695,17 +268,17 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
695 } 268 }
696 269
697 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) { 270 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
698 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 271 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
699 return -ERANGE; 272 return -ERANGE;
700 } 273 }
701 274
702 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) { 275 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
703 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 276 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
704 return -ERANGE; 277 return -ERANGE;
705 } 278 }
706 279
707 if (e->tCWL < 5) { 280 if (e->tCWL < 5) {
708 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL); 281 NV_WARN(drm, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
709 return -ERANGE; 282 return -ERANGE;
710 } 283 }
711 284
@@ -720,13 +293,13 @@ nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
720 (t->odt & 0x4) << 7; 293 (t->odt & 0x4) << 7;
721 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3; 294 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
722 295
723 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]); 296 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
724 return 0; 297 return 0;
725} 298}
726 299
727uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = { 300static const uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
728 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11}; 301 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
729uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = { 302static const uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
730 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3}; 303 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
731 304
732static int 305static int
@@ -735,6 +308,8 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
735 struct nouveau_pm_memtiming *boot, 308 struct nouveau_pm_memtiming *boot,
736 struct nouveau_pm_memtiming *t) 309 struct nouveau_pm_memtiming *t)
737{ 310{
311 struct nouveau_drm *drm = nouveau_drm(dev);
312
738 if (len < 15) { 313 if (len < 15) {
739 t->drive_strength = boot->drive_strength; 314 t->drive_strength = boot->drive_strength;
740 t->odt = boot->odt; 315 t->odt = boot->odt;
@@ -744,17 +319,17 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
744 } 319 }
745 320
746 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) { 321 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
747 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 322 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
748 return -ERANGE; 323 return -ERANGE;
749 } 324 }
750 325
751 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) { 326 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
752 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 327 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
753 return -ERANGE; 328 return -ERANGE;
754 } 329 }
755 330
756 if (t->odt > 3) { 331 if (t->odt > 3) {
757 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", 332 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
758 t->id, t->odt); 333 t->id, t->odt);
759 t->odt = 0; 334 t->odt = 0;
760 } 335 }
@@ -768,7 +343,7 @@ nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
768 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4; 343 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
769 t->mr[2] = boot->mr[2]; 344 t->mr[2] = boot->mr[2];
770 345
771 NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id, 346 NV_DEBUG(drm, "(%u) MR: %08x %08x %08x", t->id,
772 t->mr[0], t->mr[1], t->mr[2]); 347 t->mr[0], t->mr[1], t->mr[2]);
773 return 0; 348 return 0;
774} 349}
@@ -779,6 +354,8 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
779 struct nouveau_pm_memtiming *boot, 354 struct nouveau_pm_memtiming *boot,
780 struct nouveau_pm_memtiming *t) 355 struct nouveau_pm_memtiming *t)
781{ 356{
357 struct nouveau_drm *drm = nouveau_drm(dev);
358
782 if (len < 15) { 359 if (len < 15) {
783 t->drive_strength = boot->drive_strength; 360 t->drive_strength = boot->drive_strength;
784 t->odt = boot->odt; 361 t->odt = boot->odt;
@@ -788,17 +365,17 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
788 } 365 }
789 366
790 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) { 367 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
791 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL); 368 NV_WARN(drm, "(%u) Invalid tCL: %u", t->id, e->tCL);
792 return -ERANGE; 369 return -ERANGE;
793 } 370 }
794 371
795 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) { 372 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
796 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR); 373 NV_WARN(drm, "(%u) Invalid tWR: %u", t->id, e->tWR);
797 return -ERANGE; 374 return -ERANGE;
798 } 375 }
799 376
800 if (t->odt > 3) { 377 if (t->odt > 3) {
801 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x", 378 NV_WARN(drm, "(%u) Invalid odt value, assuming autocal: %x",
802 t->id, t->odt); 379 t->id, t->odt);
803 t->odt = 0; 380 t->odt = 0;
804 } 381 }
@@ -810,7 +387,7 @@ nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
810 t->drive_strength | 387 t->drive_strength |
811 (t->odt << 2); 388 (t->odt << 2);
812 389
813 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]); 390 NV_DEBUG(drm, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
814 return 0; 391 return 0;
815} 392}
816 393
@@ -818,8 +395,9 @@ int
818nouveau_mem_timing_calc(struct drm_device *dev, u32 freq, 395nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
819 struct nouveau_pm_memtiming *t) 396 struct nouveau_pm_memtiming *t)
820{ 397{
821 struct drm_nouveau_private *dev_priv = dev->dev_private; 398 struct nouveau_device *device = nouveau_dev(dev);
822 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 399 struct nouveau_fb *pfb = nouveau_fb(device);
400 struct nouveau_pm *pm = nouveau_pm(dev);
823 struct nouveau_pm_memtiming *boot = &pm->boot.timing; 401 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
824 struct nouveau_pm_tbl_entry *e; 402 struct nouveau_pm_tbl_entry *e;
825 u8 ver, len, *ptr, *ramcfg; 403 u8 ver, len, *ptr, *ramcfg;
@@ -834,7 +412,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
834 412
835 t->tCWL = boot->tCWL; 413 t->tCWL = boot->tCWL;
836 414
837 switch (dev_priv->card_type) { 415 switch (device->card_type) {
838 case NV_40: 416 case NV_40:
839 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t); 417 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
840 break; 418 break;
@@ -850,7 +428,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
850 break; 428 break;
851 } 429 }
852 430
853 switch (dev_priv->vram_type * !ret) { 431 switch (pfb->ram.type * !ret) {
854 case NV_MEM_TYPE_GDDR3: 432 case NV_MEM_TYPE_GDDR3:
855 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t); 433 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
856 break; 434 break;
@@ -877,7 +455,7 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
877 else 455 else
878 dll_off = !!(ramcfg[2] & 0x40); 456 dll_off = !!(ramcfg[2] & 0x40);
879 457
880 switch (dev_priv->vram_type) { 458 switch (pfb->ram.type) {
881 case NV_MEM_TYPE_GDDR3: 459 case NV_MEM_TYPE_GDDR3:
882 t->mr[1] &= ~0x00000040; 460 t->mr[1] &= ~0x00000040;
883 t->mr[1] |= 0x00000040 * dll_off; 461 t->mr[1] |= 0x00000040 * dll_off;
@@ -895,11 +473,12 @@ nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
895void 473void
896nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t) 474nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
897{ 475{
898 struct drm_nouveau_private *dev_priv = dev->dev_private; 476 struct nouveau_device *device = nouveau_dev(dev);
477 struct nouveau_fb *pfb = nouveau_fb(device);
899 u32 timing_base, timing_regs, mr_base; 478 u32 timing_base, timing_regs, mr_base;
900 int i; 479 int i;
901 480
902 if (dev_priv->card_type >= 0xC0) { 481 if (device->card_type >= 0xC0) {
903 timing_base = 0x10f290; 482 timing_base = 0x10f290;
904 mr_base = 0x10f300; 483 mr_base = 0x10f300;
905 } else { 484 } else {
@@ -909,7 +488,7 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
909 488
910 t->id = -1; 489 t->id = -1;
911 490
912 switch (dev_priv->card_type) { 491 switch (device->card_type) {
913 case NV_50: 492 case NV_50:
914 timing_regs = 9; 493 timing_regs = 9;
915 break; 494 break;
@@ -926,24 +505,24 @@ nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
926 return; 505 return;
927 } 506 }
928 for(i = 0; i < timing_regs; i++) 507 for(i = 0; i < timing_regs; i++)
929 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i)); 508 t->reg[i] = nv_rd32(device, timing_base + (0x04 * i));
930 509
931 t->tCWL = 0; 510 t->tCWL = 0;
932 if (dev_priv->card_type < NV_C0) { 511 if (device->card_type < NV_C0) {
933 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1; 512 t->tCWL = ((nv_rd32(device, 0x100228) & 0x0f000000) >> 24) + 1;
934 } else if (dev_priv->card_type <= NV_D0) { 513 } else if (device->card_type <= NV_D0) {
935 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7); 514 t->tCWL = ((nv_rd32(device, 0x10f294) & 0x00000f80) >> 7);
936 } 515 }
937 516
938 t->mr[0] = nv_rd32(dev, mr_base); 517 t->mr[0] = nv_rd32(device, mr_base);
939 t->mr[1] = nv_rd32(dev, mr_base + 0x04); 518 t->mr[1] = nv_rd32(device, mr_base + 0x04);
940 t->mr[2] = nv_rd32(dev, mr_base + 0x20); 519 t->mr[2] = nv_rd32(device, mr_base + 0x20);
941 t->mr[3] = nv_rd32(dev, mr_base + 0x24); 520 t->mr[3] = nv_rd32(device, mr_base + 0x24);
942 521
943 t->odt = 0; 522 t->odt = 0;
944 t->drive_strength = 0; 523 t->drive_strength = 0;
945 524
946 switch (dev_priv->vram_type) { 525 switch (pfb->ram.type) {
947 case NV_MEM_TYPE_DDR3: 526 case NV_MEM_TYPE_DDR3:
948 t->odt |= (t->mr[1] & 0x200) >> 7; 527 t->odt |= (t->mr[1] & 0x200) >> 7;
949 case NV_MEM_TYPE_DDR2: 528 case NV_MEM_TYPE_DDR2:
@@ -964,13 +543,15 @@ int
964nouveau_mem_exec(struct nouveau_mem_exec_func *exec, 543nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
965 struct nouveau_pm_level *perflvl) 544 struct nouveau_pm_level *perflvl)
966{ 545{
967 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 546 struct nouveau_drm *drm = nouveau_drm(exec->dev);
547 struct nouveau_device *device = nouveau_dev(exec->dev);
548 struct nouveau_fb *pfb = nouveau_fb(device);
968 struct nouveau_pm_memtiming *info = &perflvl->timing; 549 struct nouveau_pm_memtiming *info = &perflvl->timing;
969 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0; 550 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
970 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] }; 551 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
971 u32 mr1_dlloff; 552 u32 mr1_dlloff;
972 553
973 switch (dev_priv->vram_type) { 554 switch (pfb->ram.type) {
974 case NV_MEM_TYPE_DDR2: 555 case NV_MEM_TYPE_DDR2:
975 tDLLK = 2000; 556 tDLLK = 2000;
976 mr1_dlloff = 0x00000001; 557 mr1_dlloff = 0x00000001;
@@ -986,12 +567,12 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
986 mr1_dlloff = 0x00000040; 567 mr1_dlloff = 0x00000040;
987 break; 568 break;
988 default: 569 default:
989 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n"); 570 NV_ERROR(drm, "cannot reclock unsupported memtype\n");
990 return -ENODEV; 571 return -ENODEV;
991 } 572 }
992 573
993 /* fetch current MRs */ 574 /* fetch current MRs */
994 switch (dev_priv->vram_type) { 575 switch (pfb->ram.type) {
995 case NV_MEM_TYPE_GDDR3: 576 case NV_MEM_TYPE_GDDR3:
996 case NV_MEM_TYPE_DDR3: 577 case NV_MEM_TYPE_DDR3:
997 mr[2] = exec->mrg(exec, 2); 578 mr[2] = exec->mrg(exec, 2);
@@ -1058,194 +639,9 @@ nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
1058 exec->mrs (exec, 0, info->mr[0] | 0x00000000); 639 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1059 exec->wait(exec, tMRD); 640 exec->wait(exec, tMRD);
1060 exec->wait(exec, tDLLK); 641 exec->wait(exec, tDLLK);
1061 if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3) 642 if (pfb->ram.type == NV_MEM_TYPE_GDDR3)
1062 exec->precharge(exec); 643 exec->precharge(exec);
1063 } 644 }
1064 645
1065 return 0; 646 return 0;
1066} 647}
1067
1068int
1069nouveau_mem_vbios_type(struct drm_device *dev)
1070{
1071 struct bit_entry M;
1072 u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1073 if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1074 u8 *table = ROMPTR(dev, M.data[3]);
1075 if (table && table[0] == 0x10 && ramcfg < table[3]) {
1076 u8 *entry = table + table[1] + (ramcfg * table[2]);
1077 switch (entry[0] & 0x0f) {
1078 case 0: return NV_MEM_TYPE_DDR2;
1079 case 1: return NV_MEM_TYPE_DDR3;
1080 case 2: return NV_MEM_TYPE_GDDR3;
1081 case 3: return NV_MEM_TYPE_GDDR5;
1082 default:
1083 break;
1084 }
1085
1086 }
1087 }
1088 return NV_MEM_TYPE_UNKNOWN;
1089}
1090
1091static int
1092nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1093{
1094 /* nothing to do */
1095 return 0;
1096}
1097
1098static int
1099nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1100{
1101 /* nothing to do */
1102 return 0;
1103}
1104
1105static inline void
1106nouveau_mem_node_cleanup(struct nouveau_mem *node)
1107{
1108 if (node->vma[0].node) {
1109 nouveau_vm_unmap(&node->vma[0]);
1110 nouveau_vm_put(&node->vma[0]);
1111 }
1112
1113 if (node->vma[1].node) {
1114 nouveau_vm_unmap(&node->vma[1]);
1115 nouveau_vm_put(&node->vma[1]);
1116 }
1117}
1118
1119static void
1120nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1121 struct ttm_mem_reg *mem)
1122{
1123 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1124 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1125 struct drm_device *dev = dev_priv->dev;
1126
1127 nouveau_mem_node_cleanup(mem->mm_node);
1128 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1129}
1130
1131static int
1132nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1133 struct ttm_buffer_object *bo,
1134 struct ttm_placement *placement,
1135 struct ttm_mem_reg *mem)
1136{
1137 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1138 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1139 struct drm_device *dev = dev_priv->dev;
1140 struct nouveau_bo *nvbo = nouveau_bo(bo);
1141 struct nouveau_mem *node;
1142 u32 size_nc = 0;
1143 int ret;
1144
1145 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1146 size_nc = 1 << nvbo->page_shift;
1147
1148 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1149 mem->page_alignment << PAGE_SHIFT, size_nc,
1150 (nvbo->tile_flags >> 8) & 0x3ff, &node);
1151 if (ret) {
1152 mem->mm_node = NULL;
1153 return (ret == -ENOSPC) ? 0 : ret;
1154 }
1155
1156 node->page_shift = nvbo->page_shift;
1157
1158 mem->mm_node = node;
1159 mem->start = node->offset >> PAGE_SHIFT;
1160 return 0;
1161}
1162
1163void
1164nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1165{
1166 struct nouveau_mm *mm = man->priv;
1167 struct nouveau_mm_node *r;
1168 u32 total = 0, free = 0;
1169
1170 mutex_lock(&mm->mutex);
1171 list_for_each_entry(r, &mm->nodes, nl_entry) {
1172 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1173 prefix, r->type, ((u64)r->offset << 12),
1174 (((u64)r->offset + r->length) << 12));
1175
1176 total += r->length;
1177 if (!r->type)
1178 free += r->length;
1179 }
1180 mutex_unlock(&mm->mutex);
1181
1182 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
1183 prefix, (u64)total << 12, (u64)free << 12);
1184 printk(KERN_DEBUG "%s block: 0x%08x\n",
1185 prefix, mm->block_size << 12);
1186}
1187
1188const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1189 nouveau_vram_manager_init,
1190 nouveau_vram_manager_fini,
1191 nouveau_vram_manager_new,
1192 nouveau_vram_manager_del,
1193 nouveau_vram_manager_debug
1194};
1195
1196static int
1197nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1198{
1199 return 0;
1200}
1201
1202static int
1203nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1204{
1205 return 0;
1206}
1207
1208static void
1209nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1210 struct ttm_mem_reg *mem)
1211{
1212 nouveau_mem_node_cleanup(mem->mm_node);
1213 kfree(mem->mm_node);
1214 mem->mm_node = NULL;
1215}
1216
1217static int
1218nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1219 struct ttm_buffer_object *bo,
1220 struct ttm_placement *placement,
1221 struct ttm_mem_reg *mem)
1222{
1223 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1224 struct nouveau_mem *node;
1225
1226 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1227 dev_priv->gart_info.aper_size))
1228 return -ENOMEM;
1229
1230 node = kzalloc(sizeof(*node), GFP_KERNEL);
1231 if (!node)
1232 return -ENOMEM;
1233 node->page_shift = 12;
1234
1235 mem->mm_node = node;
1236 mem->start = 0;
1237 return 0;
1238}
1239
1240void
1241nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1242{
1243}
1244
1245const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1246 nouveau_gart_manager_init,
1247 nouveau_gart_manager_fini,
1248 nouveau_gart_manager_new,
1249 nouveau_gart_manager_del,
1250 nouveau_gart_manager_debug
1251};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
deleted file mode 100644
index 57a600c35c95..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NOUVEAU_REGION_H__
26#define __NOUVEAU_REGION_H__
27
28struct nouveau_mm_node {
29 struct list_head nl_entry;
30 struct list_head fl_entry;
31 struct list_head rl_entry;
32
33 u8 type;
34 u32 offset;
35 u32 length;
36};
37
38struct nouveau_mm {
39 struct list_head nodes;
40 struct list_head free;
41
42 struct mutex mutex;
43
44 u32 block_size;
45 int heap_nodes;
46};
47
48int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
49int nouveau_mm_fini(struct nouveau_mm *);
50int nouveau_mm_pre(struct nouveau_mm *);
51int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
52 u32 align, struct nouveau_mm_node **);
53void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
54
55int nv50_vram_init(struct drm_device *);
56void nv50_vram_fini(struct drm_device *);
57int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
58 u32 memtype, struct nouveau_mem **);
59void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
60bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
61
62int nvc0_vram_init(struct drm_device *);
63int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
64 u32 memtype, struct nouveau_mem **);
65bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
66
67#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
deleted file mode 100644
index d07f4a3310b9..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_mxm.c
+++ /dev/null
@@ -1,723 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/acpi.h>
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29
30#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
31#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
32
33static u8 *
34mxms_data(struct drm_device *dev)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 return dev_priv->mxms;
38
39}
40
41static u16
42mxms_version(struct drm_device *dev)
43{
44 u8 *mxms = mxms_data(dev);
45 u16 version = (mxms[4] << 8) | mxms[5];
46 switch (version ) {
47 case 0x0200:
48 case 0x0201:
49 case 0x0300:
50 return version;
51 default:
52 break;
53 }
54
55 MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
56 return 0x0000;
57}
58
59static u16
60mxms_headerlen(struct drm_device *dev)
61{
62 return 8;
63}
64
65static u16
66mxms_structlen(struct drm_device *dev)
67{
68 return *(u16 *)&mxms_data(dev)[6];
69}
70
71static bool
72mxms_checksum(struct drm_device *dev)
73{
74 u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
75 u8 *mxms = mxms_data(dev), sum = 0;
76 while (size--)
77 sum += *mxms++;
78 if (sum) {
79 MXM_DBG(dev, "checksum invalid\n");
80 return false;
81 }
82 return true;
83}
84
85static bool
86mxms_valid(struct drm_device *dev)
87{
88 u8 *mxms = mxms_data(dev);
89 if (*(u32 *)mxms != 0x5f4d584d) {
90 MXM_DBG(dev, "signature invalid\n");
91 return false;
92 }
93
94 if (!mxms_version(dev) || !mxms_checksum(dev))
95 return false;
96
97 return true;
98}
99
100static bool
101mxms_foreach(struct drm_device *dev, u8 types,
102 bool (*exec)(struct drm_device *, u8 *, void *), void *info)
103{
104 u8 *mxms = mxms_data(dev);
105 u8 *desc = mxms + mxms_headerlen(dev);
106 u8 *fini = desc + mxms_structlen(dev) - 1;
107 while (desc < fini) {
108 u8 type = desc[0] & 0x0f;
109 u8 headerlen = 0;
110 u8 recordlen = 0;
111 u8 entries = 0;
112
113 switch (type) {
114 case 0: /* Output Device Structure */
115 if (mxms_version(dev) >= 0x0300)
116 headerlen = 8;
117 else
118 headerlen = 6;
119 break;
120 case 1: /* System Cooling Capability Structure */
121 case 2: /* Thermal Structure */
122 case 3: /* Input Power Structure */
123 headerlen = 4;
124 break;
125 case 4: /* GPIO Device Structure */
126 headerlen = 4;
127 recordlen = 2;
128 entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
129 break;
130 case 5: /* Vendor Specific Structure */
131 headerlen = 8;
132 break;
133 case 6: /* Backlight Control Structure */
134 if (mxms_version(dev) >= 0x0300) {
135 headerlen = 4;
136 recordlen = 8;
137 entries = (desc[1] & 0xf0) >> 4;
138 } else {
139 headerlen = 8;
140 }
141 break;
142 case 7: /* Fan Control Structure */
143 headerlen = 8;
144 recordlen = 4;
145 entries = desc[1] & 0x07;
146 break;
147 default:
148 MXM_DBG(dev, "unknown descriptor type %d\n", type);
149 return false;
150 }
151
152 if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
153 static const char * mxms_desc_name[] = {
154 "ODS", "SCCS", "TS", "IPS",
155 "GSD", "VSS", "BCS", "FCS",
156 };
157 u8 *dump = desc;
158 int i, j;
159
160 MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
161 for (j = headerlen - 1; j >= 0; j--)
162 printk("%02x", dump[j]);
163 printk("\n");
164 dump += headerlen;
165
166 for (i = 0; i < entries; i++, dump += recordlen) {
167 MXM_DBG(dev, " ");
168 for (j = recordlen - 1; j >= 0; j--)
169 printk("%02x", dump[j]);
170 printk("\n");
171 }
172 }
173
174 if (types & (1 << type)) {
175 if (!exec(dev, desc, info))
176 return false;
177 }
178
179 desc += headerlen + (entries * recordlen);
180 }
181
182 return true;
183}
184
185static u8 *
186mxm_table(struct drm_device *dev, u8 *size)
187{
188 struct bit_entry x;
189
190 if (bit_table(dev, 'x', &x)) {
191 MXM_DBG(dev, "BIT 'x' table not present\n");
192 return NULL;
193 }
194
195 if (x.version != 1 || x.length < 3) {
196 MXM_MSG(dev, "BIT x table %d/%d unknown\n",
197 x.version, x.length);
198 return NULL;
199 }
200
201 *size = x.length;
202 return x.data;
203}
204
205/* These map MXM v2.x digital connection values to the appropriate SOR/link,
206 * hopefully they're correct for all boards within the same chipset...
207 *
208 * MXM v3.x VBIOS are nicer and provide pointers to these tables.
209 */
210static u8 nv84_sor_map[16] = {
211 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
213};
214
215static u8 nv92_sor_map[16] = {
216 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
217 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
218};
219
220static u8 nv94_sor_map[16] = {
221 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
222 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
223};
224
225static u8 nv96_sor_map[16] = {
226 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
227 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
228};
229
230static u8 nv98_sor_map[16] = {
231 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
232 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
233};
234
235static u8
236mxm_sor_map(struct drm_device *dev, u8 conn)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 u8 len, *mxm = mxm_table(dev, &len);
240 if (mxm && len >= 6) {
241 u8 *map = ROMPTR(dev, mxm[4]);
242 if (map) {
243 if (map[0] == 0x10) {
244 if (conn < map[3])
245 return map[map[1] + conn];
246 return 0x00;
247 }
248
249 MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
250 }
251 }
252
253 if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
254 return nv84_sor_map[conn];
255 if (dev_priv->chipset == 0x92)
256 return nv92_sor_map[conn];
257 if (dev_priv->chipset == 0x94)
258 return nv94_sor_map[conn];
259 if (dev_priv->chipset == 0x96)
260 return nv96_sor_map[conn];
261 if (dev_priv->chipset == 0x98)
262 return nv98_sor_map[conn];
263
264 MXM_MSG(dev, "missing sor map\n");
265 return 0x00;
266}
267
268static u8
269mxm_ddc_map(struct drm_device *dev, u8 port)
270{
271 u8 len, *mxm = mxm_table(dev, &len);
272 if (mxm && len >= 8) {
273 u8 *map = ROMPTR(dev, mxm[6]);
274 if (map) {
275 if (map[0] == 0x10) {
276 if (port < map[3])
277 return map[map[1] + port];
278 return 0x00;
279 }
280
281 MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
282 }
283 }
284
285 /* v2.x: directly write port as dcb i2cidx */
286 return (port << 4) | port;
287}
288
289struct mxms_odev {
290 u8 outp_type;
291 u8 conn_type;
292 u8 ddc_port;
293 u8 dig_conn;
294};
295
296static void
297mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
298{
299 u64 data = ROM32(pdata[0]);
300 if (mxms_version(dev) >= 0x0300)
301 data |= (u64)ROM16(pdata[4]) << 32;
302
303 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
304 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
305 desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
306 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
307}
308
309struct context {
310 u32 *outp;
311 struct mxms_odev desc;
312};
313
314static bool
315mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
316{
317 struct context *ctx = info;
318 struct mxms_odev desc;
319
320 mxms_output_device(dev, data, &desc);
321 if (desc.outp_type == 2 &&
322 desc.dig_conn == ctx->desc.dig_conn)
323 return false;
324 return true;
325}
326
327static bool
328mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
329{
330 struct context *ctx = info;
331 u64 desc = *(u64 *)data;
332
333 mxms_output_device(dev, data, &ctx->desc);
334
335 /* match dcb encoder type to mxm-ods device type */
336 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
337 return true;
338
339 /* digital output, have some extra stuff to match here, there's a
340 * table in the vbios that provides a mapping from the mxm digital
341 * connection enum values to SOR/link
342 */
343 if ((desc & 0x00000000000000f0) >= 0x20) {
344 /* check against sor index */
345 u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
346 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
347 return true;
348
349 /* check dcb entry has a compatible link field */
350 link = (link & 0x30) >> 4;
351 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
352 return true;
353 }
354
355 /* mark this descriptor accounted for by setting invalid device type,
356 * except of course some manufactures don't follow specs properly and
357 * we need to avoid killing off the TMDS function on DP connectors
358 * if MXM-SIS is missing an entry for it.
359 */
360 data[0] &= ~0xf0;
361 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
362 mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
363 data[0] |= 0x20; /* modify descriptor to match TMDS now */
364 } else {
365 data[0] |= 0xf0;
366 }
367
368 return false;
369}
370
371static int
372mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
373{
374 struct context ctx = { .outp = (u32 *)dcbe };
375 u8 type, i2cidx, link;
376 u8 *conn;
377
378 /* look for an output device structure that matches this dcb entry.
379 * if one isn't found, disable it.
380 */
381 if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
382 MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
383 idx, ctx.outp[0], ctx.outp[1]);
384 ctx.outp[0] |= 0x0000000f;
385 return 0;
386 }
387
388 /* modify the output's ddc/aux port, there's a pointer to a table
389 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
390 * vbios mxm table
391 */
392 i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
393 if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
394 i2cidx = (i2cidx & 0x0f) << 4;
395 else
396 i2cidx = (i2cidx & 0xf0);
397
398 if (i2cidx != 0xf0) {
399 ctx.outp[0] &= ~0x000000f0;
400 ctx.outp[0] |= i2cidx;
401 }
402
403 /* override dcb sorconf.link, based on what mxm data says */
404 switch (ctx.desc.outp_type) {
405 case 0x00: /* Analog CRT */
406 case 0x01: /* Analog TV/HDTV */
407 break;
408 default:
409 link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
410 ctx.outp[1] &= ~0x00000030;
411 ctx.outp[1] |= link;
412 break;
413 }
414
415 /* we may need to fixup various other vbios tables based on what
416 * the descriptor says the connector type should be.
417 *
418 * in a lot of cases, the vbios tables will claim DVI-I is possible,
419 * and the mxm data says the connector is really HDMI. another
420 * common example is DP->eDP.
421 */
422 conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
423 type = conn[0];
424 switch (ctx.desc.conn_type) {
425 case 0x01: /* LVDS */
426 ctx.outp[1] |= 0x00000004; /* use_power_scripts */
427 /* XXX: modify default link width in LVDS table */
428 break;
429 case 0x02: /* HDMI */
430 type = DCB_CONNECTOR_HDMI_1;
431 break;
432 case 0x03: /* DVI-D */
433 type = DCB_CONNECTOR_DVI_D;
434 break;
435 case 0x0e: /* eDP, falls through to DPint */
436 ctx.outp[1] |= 0x00010000;
437 case 0x07: /* DP internal, wtf is this?? HP8670w */
438 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
439 type = DCB_CONNECTOR_eDP;
440 break;
441 default:
442 break;
443 }
444
445 if (mxms_version(dev) >= 0x0300)
446 conn[0] = type;
447
448 return 0;
449}
450
451static bool
452mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
453{
454 u64 desc = *(u64 *)data;
455 if ((desc & 0xf0) != 0xf0)
456 MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
457 return true;
458}
459
460static void
461mxm_dcb_sanitise(struct drm_device *dev)
462{
463 u8 *dcb = dcb_table(dev);
464 if (!dcb || dcb[0] != 0x40) {
465 MXM_DBG(dev, "unsupported DCB version\n");
466 return;
467 }
468
469 dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
470 mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
471}
472
473static bool
474mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
475 u8 offset, u8 size, u8 *data)
476{
477 struct i2c_msg msgs[] = {
478 { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
479 { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
480 };
481
482 return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
483}
484
485static bool
486mxm_shadow_rom(struct drm_device *dev, u8 version)
487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_i2c_chan *i2c = NULL;
490 u8 i2cidx, mxms[6], addr, size;
491
492 i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
493 if (i2cidx < 0x0f)
494 i2c = nouveau_i2c_find(dev, i2cidx);
495 if (!i2c)
496 return false;
497
498 addr = 0x54;
499 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
500 addr = 0x56;
501 if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
502 return false;
503 }
504
505 dev_priv->mxms = mxms;
506 size = mxms_headerlen(dev) + mxms_structlen(dev);
507 dev_priv->mxms = kmalloc(size, GFP_KERNEL);
508
509 if (dev_priv->mxms &&
510 mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
511 return true;
512
513 kfree(dev_priv->mxms);
514 dev_priv->mxms = NULL;
515 return false;
516}
517
518#if defined(CONFIG_ACPI)
519static bool
520mxm_shadow_dsm(struct drm_device *dev, u8 version)
521{
522 struct drm_nouveau_private *dev_priv = dev->dev_private;
523 static char muid[] = {
524 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
525 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
526 };
527 u32 mxms_args[] = { 0x00000000 };
528 union acpi_object args[4] = {
529 /* _DSM MUID */
530 { .buffer.type = 3,
531 .buffer.length = sizeof(muid),
532 .buffer.pointer = muid,
533 },
534 /* spec says this can be zero to mean "highest revision", but
535 * of course there's at least one bios out there which fails
536 * unless you pass in exactly the version it supports..
537 */
538 { .integer.type = ACPI_TYPE_INTEGER,
539 .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
540 },
541 /* MXMS function */
542 { .integer.type = ACPI_TYPE_INTEGER,
543 .integer.value = 0x00000010,
544 },
545 /* Pointer to MXMS arguments */
546 { .buffer.type = ACPI_TYPE_BUFFER,
547 .buffer.length = sizeof(mxms_args),
548 .buffer.pointer = (char *)mxms_args,
549 },
550 };
551 struct acpi_object_list list = { ARRAY_SIZE(args), args };
552 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
553 union acpi_object *obj;
554 acpi_handle handle;
555 int ret;
556
557 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
558 if (!handle)
559 return false;
560
561 ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
562 if (ret) {
563 MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
564 return false;
565 }
566
567 obj = retn.pointer;
568 if (obj->type == ACPI_TYPE_BUFFER) {
569 dev_priv->mxms = kmemdup(obj->buffer.pointer,
570 obj->buffer.length, GFP_KERNEL);
571 } else
572 if (obj->type == ACPI_TYPE_INTEGER) {
573 MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
574 }
575
576 kfree(obj);
577 return dev_priv->mxms != NULL;
578}
579#endif
580
581#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
582
583#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
584
585static u8
586wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
587{
588 u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
589 struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
590 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
591 union acpi_object *obj;
592 acpi_status status;
593
594 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
595 if (ACPI_FAILURE(status)) {
596 MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
597 return 0x00;
598 }
599
600 obj = retn.pointer;
601 if (obj->type == ACPI_TYPE_INTEGER) {
602 version = obj->integer.value;
603 MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
604 (version >> 4), version & 0x0f);
605 } else {
606 version = 0;
607 MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
608 }
609
610 kfree(obj);
611 return version;
612}
613
614static bool
615mxm_shadow_wmi(struct drm_device *dev, u8 version)
616{
617 struct drm_nouveau_private *dev_priv = dev->dev_private;
618 u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
619 struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
620 struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
621 union acpi_object *obj;
622 acpi_status status;
623
624 if (!wmi_has_guid(WMI_WMMX_GUID)) {
625 MXM_DBG(dev, "WMMX GUID not found\n");
626 return false;
627 }
628
629 mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
630 if (!mxms_args[1])
631 mxms_args[1] = wmi_wmmx_mxmi(dev, version);
632 if (!mxms_args[1])
633 return false;
634
635 status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
636 if (ACPI_FAILURE(status)) {
637 MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
638 return false;
639 }
640
641 obj = retn.pointer;
642 if (obj->type == ACPI_TYPE_BUFFER) {
643 dev_priv->mxms = kmemdup(obj->buffer.pointer,
644 obj->buffer.length, GFP_KERNEL);
645 }
646
647 kfree(obj);
648 return dev_priv->mxms != NULL;
649}
650#endif
651
652struct mxm_shadow_h {
653 const char *name;
654 bool (*exec)(struct drm_device *, u8 version);
655} _mxm_shadow[] = {
656 { "ROM", mxm_shadow_rom },
657#if defined(CONFIG_ACPI)
658 { "DSM", mxm_shadow_dsm },
659#endif
660#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
661 { "WMI", mxm_shadow_wmi },
662#endif
663 {}
664};
665
666static int
667mxm_shadow(struct drm_device *dev, u8 version)
668{
669 struct drm_nouveau_private *dev_priv = dev->dev_private;
670 struct mxm_shadow_h *shadow = _mxm_shadow;
671 do {
672 MXM_DBG(dev, "checking %s\n", shadow->name);
673 if (shadow->exec(dev, version)) {
674 if (mxms_valid(dev))
675 return 0;
676 kfree(dev_priv->mxms);
677 dev_priv->mxms = NULL;
678 }
679 } while ((++shadow)->name);
680 return -ENOENT;
681}
682
683int
684nouveau_mxm_init(struct drm_device *dev)
685{
686 u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
687 if (!mxm || !mxm[0]) {
688 MXM_MSG(dev, "no VBIOS data, nothing to do\n");
689 return 0;
690 }
691
692 MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
693
694 if (mxm_shadow(dev, mxm[0])) {
695 MXM_MSG(dev, "failed to locate valid SIS\n");
696#if 0
697 /* we should, perhaps, fall back to some kind of limited
698 * mode here if the x86 vbios hasn't already done the
699 * work for us (so we prevent loading with completely
700 * whacked vbios tables).
701 */
702 return -EINVAL;
703#else
704 return 0;
705#endif
706 }
707
708 MXM_MSG(dev, "MXMS Version %d.%d\n",
709 mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
710 mxms_foreach(dev, 0, NULL, NULL);
711
712 if (nouveau_mxmdcb)
713 mxm_dcb_sanitise(dev);
714 return 0;
715}
716
717void
718nouveau_mxm_fini(struct drm_device *dev)
719{
720 struct drm_nouveau_private *dev_priv = dev->dev_private;
721 kfree(dev_priv->mxms);
722 dev_priv->mxms = NULL;
723}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
deleted file mode 100644
index 1ad3e6c8c432..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include <drm/drmP.h>
29#include "nouveau_drv.h"
30#include "nouveau_ramht.h"
31
32int
33nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_bo *ntfy = NULL;
38 uint32_t flags, ttmpl;
39 int ret;
40
41 if (nouveau_vram_notify) {
42 flags = NOUVEAU_GEM_DOMAIN_VRAM;
43 ttmpl = TTM_PL_FLAG_VRAM;
44 } else {
45 flags = NOUVEAU_GEM_DOMAIN_GART;
46 ttmpl = TTM_PL_FLAG_TT;
47 }
48
49 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
50 if (ret)
51 return ret;
52
53 ret = nouveau_bo_pin(ntfy, ttmpl);
54 if (ret)
55 goto out_err;
56
57 ret = nouveau_bo_map(ntfy);
58 if (ret)
59 goto out_err;
60
61 if (dev_priv->card_type >= NV_50) {
62 ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
63 if (ret)
64 goto out_err;
65 }
66
67 ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
68 if (ret)
69 goto out_err;
70
71 chan->notifier_bo = ntfy;
72out_err:
73 if (ret) {
74 nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
75 drm_gem_object_unreference_unlocked(ntfy->gem);
76 }
77
78 return ret;
79}
80
81void
82nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
83{
84 struct drm_device *dev = chan->dev;
85
86 if (!chan->notifier_bo)
87 return;
88
89 nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
90 nouveau_bo_unmap(chan->notifier_bo);
91 mutex_lock(&dev->struct_mutex);
92 nouveau_bo_unpin(chan->notifier_bo);
93 mutex_unlock(&dev->struct_mutex);
94 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
95 drm_mm_takedown(&chan->notifier_heap);
96}
97
98static void
99nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
100 struct nouveau_gpuobj *gpuobj)
101{
102 NV_DEBUG(dev, "\n");
103
104 if (gpuobj->priv)
105 drm_mm_put_block(gpuobj->priv);
106}
107
108int
109nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
110 int size, uint32_t start, uint32_t end,
111 uint32_t *b_offset)
112{
113 struct drm_device *dev = chan->dev;
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 struct nouveau_gpuobj *nobj = NULL;
116 struct drm_mm_node *mem;
117 uint64_t offset;
118 int target, ret;
119
120 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
121 start, end, 0);
122 if (mem)
123 mem = drm_mm_get_block_range(mem, size, 0, start, end);
124 if (!mem) {
125 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
126 return -ENOMEM;
127 }
128
129 if (dev_priv->card_type < NV_50) {
130 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
131 target = NV_MEM_TARGET_VRAM;
132 else
133 target = NV_MEM_TARGET_GART;
134 offset = chan->notifier_bo->bo.offset;
135 } else {
136 target = NV_MEM_TARGET_VM;
137 offset = chan->notifier_vma.offset;
138 }
139 offset += mem->start;
140
141 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
142 mem->size, NV_MEM_ACCESS_RW, target,
143 &nobj);
144 if (ret) {
145 drm_mm_put_block(mem);
146 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
147 return ret;
148 }
149 nobj->dtor = nouveau_notifier_gpuobj_dtor;
150 nobj->priv = mem;
151
152 ret = nouveau_ramht_insert(chan, handle, nobj);
153 nouveau_gpuobj_ref(NULL, &nobj);
154 if (ret) {
155 drm_mm_put_block(mem);
156 NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
157 return ret;
158 }
159
160 *b_offset = mem->start;
161 return 0;
162}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 4946d308a362..4fe883c54918 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -24,14 +24,15 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_reg.h"
28#include "nouveau_pm.h" 29#include "nouveau_pm.h"
29 30
30static u8 * 31static u8 *
31nouveau_perf_table(struct drm_device *dev, u8 *ver) 32nouveau_perf_table(struct drm_device *dev, u8 *ver)
32{ 33{
33 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_drm *drm = nouveau_drm(dev);
34 struct nvbios *bios = &dev_priv->vbios; 35 struct nvbios *bios = &drm->vbios;
35 struct bit_entry P; 36 struct bit_entry P;
36 37
37 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) { 38 if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
@@ -87,7 +88,7 @@ u8 *
87nouveau_perf_rammap(struct drm_device *dev, u32 freq, 88nouveau_perf_rammap(struct drm_device *dev, u32 freq,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 89 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
89{ 90{
90 struct drm_nouveau_private *dev_priv = dev->dev_private; 91 struct nouveau_drm *drm = nouveau_drm(dev);
91 struct bit_entry P; 92 struct bit_entry P;
92 u8 *perf, i = 0; 93 u8 *perf, i = 0;
93 94
@@ -114,8 +115,8 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
114 return NULL; 115 return NULL;
115 } 116 }
116 117
117 if (dev_priv->chipset == 0x49 || 118 if (nv_device(drm->device)->chipset == 0x49 ||
118 dev_priv->chipset == 0x4b) 119 nv_device(drm->device)->chipset == 0x4b)
119 freq /= 2; 120 freq /= 2;
120 121
121 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) { 122 while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
@@ -142,12 +143,13 @@ nouveau_perf_rammap(struct drm_device *dev, u32 freq,
142u8 * 143u8 *
143nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) 144nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
144{ 145{
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 146 struct nouveau_device *device = nouveau_dev(dev);
146 struct nvbios *bios = &dev_priv->vbios; 147 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvbios *bios = &drm->vbios;
147 u8 strap, hdr, cnt; 149 u8 strap, hdr, cnt;
148 u8 *rammap; 150 u8 *rammap;
149 151
150 strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2; 152 strap = (nv_rd32(device, 0x101000) & 0x0000003c) >> 2;
151 if (bios->ram_restrict_tbl_ptr) 153 if (bios->ram_restrict_tbl_ptr)
152 strap = bios->data[bios->ram_restrict_tbl_ptr + strap]; 154 strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
153 155
@@ -161,8 +163,8 @@ nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
161u8 * 163u8 *
162nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len) 164nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
163{ 165{
164 struct drm_nouveau_private *dev_priv = dev->dev_private; 166 struct nouveau_drm *drm = nouveau_drm(dev);
165 struct nvbios *bios = &dev_priv->vbios; 167 struct nvbios *bios = &drm->vbios;
166 struct bit_entry P; 168 struct bit_entry P;
167 u8 *perf, *timing = NULL; 169 u8 *perf, *timing = NULL;
168 u8 i = 0, hdr, cnt; 170 u8 i = 0, hdr, cnt;
@@ -202,20 +204,21 @@ nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
202static void 204static void
203legacy_perf_init(struct drm_device *dev) 205legacy_perf_init(struct drm_device *dev)
204{ 206{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 207 struct nouveau_device *device = nouveau_dev(dev);
206 struct nvbios *bios = &dev_priv->vbios; 208 struct nouveau_drm *drm = nouveau_drm(dev);
207 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 209 struct nvbios *bios = &drm->vbios;
210 struct nouveau_pm *pm = nouveau_pm(dev);
208 char *perf, *entry, *bmp = &bios->data[bios->offset]; 211 char *perf, *entry, *bmp = &bios->data[bios->offset];
209 int headerlen, use_straps; 212 int headerlen, use_straps;
210 213
211 if (bmp[5] < 0x5 || bmp[6] < 0x14) { 214 if (bmp[5] < 0x5 || bmp[6] < 0x14) {
212 NV_DEBUG(dev, "BMP version too old for perf\n"); 215 NV_DEBUG(drm, "BMP version too old for perf\n");
213 return; 216 return;
214 } 217 }
215 218
216 perf = ROMPTR(dev, bmp[0x73]); 219 perf = ROMPTR(dev, bmp[0x73]);
217 if (!perf) { 220 if (!perf) {
218 NV_DEBUG(dev, "No memclock table pointer found.\n"); 221 NV_DEBUG(drm, "No memclock table pointer found.\n");
219 return; 222 return;
220 } 223 }
221 224
@@ -231,13 +234,13 @@ legacy_perf_init(struct drm_device *dev)
231 headerlen = (use_straps ? 8 : 2); 234 headerlen = (use_straps ? 8 : 2);
232 break; 235 break;
233 default: 236 default:
234 NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]); 237 NV_WARN(drm, "Unknown memclock table version %x.\n", perf[0]);
235 return; 238 return;
236 } 239 }
237 240
238 entry = perf + headerlen; 241 entry = perf + headerlen;
239 if (use_straps) 242 if (use_straps)
240 entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1; 243 entry += (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
241 244
242 sprintf(pm->perflvl[0].name, "performance_level_0"); 245 sprintf(pm->perflvl[0].name, "performance_level_0");
243 pm->perflvl[0].memory = ROM16(entry[0]) * 20; 246 pm->perflvl[0].memory = ROM16(entry[0]) * 20;
@@ -247,7 +250,7 @@ legacy_perf_init(struct drm_device *dev)
247static void 250static void
248nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl) 251nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
249{ 252{
250 struct drm_nouveau_private *dev_priv = dev->dev_private; 253 struct nouveau_drm *drm = nouveau_drm(dev);
251 struct bit_entry P; 254 struct bit_entry P;
252 u8 *vmap; 255 u8 *vmap;
253 int id; 256 int id;
@@ -258,7 +261,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
258 /* boards using voltage table version <0x40 store the voltage 261 /* boards using voltage table version <0x40 store the voltage
259 * level directly in the perflvl entry as a multiple of 10mV 262 * level directly in the perflvl entry as a multiple of 10mV
260 */ 263 */
261 if (dev_priv->engine.pm.voltage.version < 0x40) { 264 if (drm->pm->voltage.version < 0x40) {
262 perflvl->volt_min = id * 10000; 265 perflvl->volt_min = id * 10000;
263 perflvl->volt_max = perflvl->volt_min; 266 perflvl->volt_max = perflvl->volt_min;
264 return; 267 return;
@@ -268,14 +271,14 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
268 * vbios table containing a min/max voltage value for the perflvl 271 * vbios table containing a min/max voltage value for the perflvl
269 */ 272 */
270 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) { 273 if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
271 NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n", 274 NV_DEBUG(drm, "where's our volt map table ptr? %d %d\n",
272 P.version, P.length); 275 P.version, P.length);
273 return; 276 return;
274 } 277 }
275 278
276 vmap = ROMPTR(dev, P.data[32]); 279 vmap = ROMPTR(dev, P.data[32]);
277 if (!vmap) { 280 if (!vmap) {
278 NV_DEBUG(dev, "volt map table pointer invalid\n"); 281 NV_DEBUG(drm, "volt map table pointer invalid\n");
279 return; 282 return;
280 } 283 }
281 284
@@ -289,9 +292,9 @@ nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
289void 292void
290nouveau_perf_init(struct drm_device *dev) 293nouveau_perf_init(struct drm_device *dev)
291{ 294{
292 struct drm_nouveau_private *dev_priv = dev->dev_private; 295 struct nouveau_drm *drm = nouveau_drm(dev);
293 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 296 struct nouveau_pm *pm = nouveau_pm(dev);
294 struct nvbios *bios = &dev_priv->vbios; 297 struct nvbios *bios = &drm->vbios;
295 u8 *perf, ver, hdr, cnt, len; 298 u8 *perf, ver, hdr, cnt, len;
296 int ret, vid, i = -1; 299 int ret, vid, i = -1;
297 300
@@ -301,8 +304,6 @@ nouveau_perf_init(struct drm_device *dev)
301 } 304 }
302 305
303 perf = nouveau_perf_table(dev, &ver); 306 perf = nouveau_perf_table(dev, &ver);
304 if (ver >= 0x20 && ver < 0x40)
305 pm->fan.pwm_divisor = ROM16(perf[6]);
306 307
307 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) { 308 while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
308 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 309 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
@@ -328,8 +329,8 @@ nouveau_perf_init(struct drm_device *dev)
328 perflvl->shader = ROM16(perf[6]) * 1000; 329 perflvl->shader = ROM16(perf[6]) * 1000;
329 perflvl->core = perflvl->shader; 330 perflvl->core = perflvl->shader;
330 perflvl->core += (signed char)perf[8] * 1000; 331 perflvl->core += (signed char)perf[8] * 1000;
331 if (dev_priv->chipset == 0x49 || 332 if (nv_device(drm->device)->chipset == 0x49 ||
332 dev_priv->chipset == 0x4b) 333 nv_device(drm->device)->chipset == 0x4b)
333 perflvl->memory = ROM16(perf[11]) * 1000; 334 perflvl->memory = ROM16(perf[11]) * 1000;
334 else 335 else
335 perflvl->memory = ROM16(perf[11]) * 2000; 336 perflvl->memory = ROM16(perf[11]) * 2000;
@@ -356,7 +357,7 @@ nouveau_perf_init(struct drm_device *dev)
356#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000) 357#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
357 perflvl->fanspeed = 0; /*XXX*/ 358 perflvl->fanspeed = 0; /*XXX*/
358 perflvl->volt_min = perf[2]; 359 perflvl->volt_min = perf[2];
359 if (dev_priv->card_type == NV_50) { 360 if (nv_device(drm->device)->card_type == NV_50) {
360 perflvl->core = subent(0); 361 perflvl->core = subent(0);
361 perflvl->shader = subent(1); 362 perflvl->shader = subent(1);
362 perflvl->memory = subent(2); 363 perflvl->memory = subent(2);
@@ -382,7 +383,7 @@ nouveau_perf_init(struct drm_device *dev)
382 if (pm->voltage.supported && perflvl->volt_min) { 383 if (pm->voltage.supported && perflvl->volt_min) {
383 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min); 384 vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
384 if (vid < 0) { 385 if (vid < 0) {
385 NV_DEBUG(dev, "perflvl %d, bad vid\n", i); 386 NV_DEBUG(drm, "perflvl %d, bad vid\n", i);
386 continue; 387 continue;
387 } 388 }
388 } 389 }
@@ -391,7 +392,7 @@ nouveau_perf_init(struct drm_device *dev)
391 ret = nouveau_mem_timing_calc(dev, perflvl->memory, 392 ret = nouveau_mem_timing_calc(dev, perflvl->memory,
392 &perflvl->timing); 393 &perflvl->timing);
393 if (ret) { 394 if (ret) {
394 NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret); 395 NV_DEBUG(drm, "perflvl %d, bad timing: %d\n", i, ret);
395 continue; 396 continue;
396 } 397 }
397 398
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 7cf95b20b7a4..0bf64c90aa20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -22,12 +22,6 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
30
31#ifdef CONFIG_ACPI 25#ifdef CONFIG_ACPI
32#include <linux/acpi.h> 26#include <linux/acpi.h>
33#endif 27#endif
@@ -35,85 +29,41 @@
35#include <linux/hwmon.h> 29#include <linux/hwmon.h>
36#include <linux/hwmon-sysfs.h> 30#include <linux/hwmon-sysfs.h>
37 31
38static int 32#include <drm/drmP.h>
39nouveau_pwmfan_get(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
43 struct gpio_func gpio;
44 u32 divs, duty;
45 int ret;
46 33
47 if (!pm->pwm_get) 34#include "nouveau_drm.h"
48 return -ENODEV; 35#include "nouveau_pm.h"
49 36
50 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio); 37#include <subdev/gpio.h>
51 if (ret == 0) { 38#include <subdev/timer.h>
52 ret = pm->pwm_get(dev, gpio.line, &divs, &duty); 39#include <subdev/therm.h>
53 if (ret == 0 && divs) {
54 divs = max(divs, duty);
55 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
56 duty = divs - duty;
57 return (duty * 100) / divs;
58 }
59 40
60 return nouveau_gpio_func_get(dev, gpio.func) * 100; 41MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
61 } 42static char *nouveau_perflvl;
43module_param_named(perflvl, nouveau_perflvl, charp, 0400);
62 44
63 return -ENODEV; 45MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
64} 46static int nouveau_perflvl_wr;
65 47module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
66static int
67nouveau_pwmfan_set(struct drm_device *dev, int percent)
68{
69 struct drm_nouveau_private *dev_priv = dev->dev_private;
70 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
71 struct gpio_func gpio;
72 u32 divs, duty;
73 int ret;
74
75 if (!pm->pwm_set)
76 return -ENODEV;
77
78 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
79 if (ret == 0) {
80 divs = pm->fan.pwm_divisor;
81 if (pm->fan.pwm_freq) {
82 /*XXX: PNVIO clock more than likely... */
83 divs = 135000 / pm->fan.pwm_freq;
84 if (dev_priv->chipset < 0xa3)
85 divs /= 4;
86 }
87
88 duty = ((divs * percent) + 99) / 100;
89 if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
90 duty = divs - duty;
91
92 ret = pm->pwm_set(dev, gpio.line, divs, duty);
93 if (!ret)
94 pm->fan.percent = percent;
95 return ret;
96 }
97
98 return -ENODEV;
99}
100 48
101static int 49static int
102nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl, 50nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
103 struct nouveau_pm_level *a, struct nouveau_pm_level *b) 51 struct nouveau_pm_level *a, struct nouveau_pm_level *b)
104{ 52{
105 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct nouveau_drm *drm = nouveau_drm(dev);
106 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 54 struct nouveau_pm *pm = nouveau_pm(dev);
55 struct nouveau_therm *therm = nouveau_therm(drm);
107 int ret; 56 int ret;
108 57
109 /*XXX: not on all boards, we should control based on temperature 58 /*XXX: not on all boards, we should control based on temperature
110 * on recent boards.. or maybe on some other factor we don't 59 * on recent boards.. or maybe on some other factor we don't
111 * know about? 60 * know about?
112 */ 61 */
113 if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) { 62 if (therm && therm->fan_set &&
114 ret = nouveau_pwmfan_set(dev, perflvl->fanspeed); 63 a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
64 ret = therm->fan_set(therm, perflvl->fanspeed);
115 if (ret && ret != -ENODEV) { 65 if (ret && ret != -ENODEV) {
116 NV_ERROR(dev, "fanspeed set failed: %d\n", ret); 66 NV_ERROR(drm, "fanspeed set failed: %d\n", ret);
117 return ret; 67 return ret;
118 } 68 }
119 } 69 }
@@ -122,7 +72,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
122 if (perflvl->volt_min && b->volt_min > a->volt_min) { 72 if (perflvl->volt_min && b->volt_min > a->volt_min) {
123 ret = pm->voltage_set(dev, perflvl->volt_min); 73 ret = pm->voltage_set(dev, perflvl->volt_min);
124 if (ret) { 74 if (ret) {
125 NV_ERROR(dev, "voltage set failed: %d\n", ret); 75 NV_ERROR(drm, "voltage set failed: %d\n", ret);
126 return ret; 76 return ret;
127 } 77 }
128 } 78 }
@@ -134,8 +84,7 @@ nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
134static int 84static int
135nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl) 85nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
136{ 86{
137 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct nouveau_pm *pm = nouveau_pm(dev);
138 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
139 void *state; 88 void *state;
140 int ret; 89 int ret;
141 90
@@ -171,8 +120,9 @@ error:
171void 120void
172nouveau_pm_trigger(struct drm_device *dev) 121nouveau_pm_trigger(struct drm_device *dev)
173{ 122{
174 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct nouveau_drm *drm = nouveau_drm(dev);
175 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 124 struct nouveau_timer *ptimer = nouveau_timer(drm->device);
125 struct nouveau_pm *pm = nouveau_pm(dev);
176 struct nouveau_pm_profile *profile = NULL; 126 struct nouveau_pm_profile *profile = NULL;
177 struct nouveau_pm_level *perflvl = NULL; 127 struct nouveau_pm_level *perflvl = NULL;
178 int ret; 128 int ret;
@@ -194,24 +144,22 @@ nouveau_pm_trigger(struct drm_device *dev)
194 144
195 /* change perflvl, if necessary */ 145 /* change perflvl, if necessary */
196 if (perflvl != pm->cur) { 146 if (perflvl != pm->cur) {
197 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 147 u64 time0 = ptimer->read(ptimer);
198 u64 time0 = ptimer->read(dev);
199 148
200 NV_INFO(dev, "setting performance level: %d", perflvl->id); 149 NV_INFO(drm, "setting performance level: %d", perflvl->id);
201 ret = nouveau_pm_perflvl_set(dev, perflvl); 150 ret = nouveau_pm_perflvl_set(dev, perflvl);
202 if (ret) 151 if (ret)
203 NV_INFO(dev, "> reclocking failed: %d\n\n", ret); 152 NV_INFO(drm, "> reclocking failed: %d\n\n", ret);
204 153
205 NV_INFO(dev, "> reclocking took %lluns\n\n", 154 NV_INFO(drm, "> reclocking took %lluns\n\n",
206 ptimer->read(dev) - time0); 155 ptimer->read(ptimer) - time0);
207 } 156 }
208} 157}
209 158
210static struct nouveau_pm_profile * 159static struct nouveau_pm_profile *
211profile_find(struct drm_device *dev, const char *string) 160profile_find(struct drm_device *dev, const char *string)
212{ 161{
213 struct drm_nouveau_private *dev_priv = dev->dev_private; 162 struct nouveau_pm *pm = nouveau_pm(dev);
214 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
215 struct nouveau_pm_profile *profile; 163 struct nouveau_pm_profile *profile;
216 164
217 list_for_each_entry(profile, &pm->profiles, head) { 165 list_for_each_entry(profile, &pm->profiles, head) {
@@ -225,8 +173,7 @@ profile_find(struct drm_device *dev, const char *string)
225static int 173static int
226nouveau_pm_profile_set(struct drm_device *dev, const char *profile) 174nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
227{ 175{
228 struct drm_nouveau_private *dev_priv = dev->dev_private; 176 struct nouveau_pm *pm = nouveau_pm(dev);
229 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
230 struct nouveau_pm_profile *ac = NULL, *dc = NULL; 177 struct nouveau_pm_profile *ac = NULL, *dc = NULL;
231 char string[16], *cur = string, *ptr; 178 char string[16], *cur = string, *ptr;
232 179
@@ -279,8 +226,9 @@ const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
279static int 226static int
280nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 227nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
281{ 228{
282 struct drm_nouveau_private *dev_priv = dev->dev_private; 229 struct nouveau_drm *drm = nouveau_drm(dev);
283 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 230 struct nouveau_pm *pm = nouveau_pm(dev);
231 struct nouveau_therm *therm = nouveau_therm(drm->device);
284 int ret; 232 int ret;
285 233
286 memset(perflvl, 0, sizeof(*perflvl)); 234 memset(perflvl, 0, sizeof(*perflvl));
@@ -299,9 +247,11 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
299 } 247 }
300 } 248 }
301 249
302 ret = nouveau_pwmfan_get(dev); 250 if (therm && therm->fan_get) {
303 if (ret > 0) 251 ret = therm->fan_get(therm);
304 perflvl->fanspeed = ret; 252 if (ret >= 0)
253 perflvl->fanspeed = ret;
254 }
305 255
306 nouveau_mem_timing_read(dev, &perflvl->timing); 256 nouveau_mem_timing_read(dev, &perflvl->timing);
307 return 0; 257 return 0;
@@ -362,8 +312,7 @@ static ssize_t
362nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf) 312nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
363{ 313{
364 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d)); 314 struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
365 struct drm_nouveau_private *dev_priv = dev->dev_private; 315 struct nouveau_pm *pm = nouveau_pm(dev);
366 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
367 struct nouveau_pm_level cur; 316 struct nouveau_pm_level cur;
368 int len = PAGE_SIZE, ret; 317 int len = PAGE_SIZE, ret;
369 char *ptr = buf; 318 char *ptr = buf;
@@ -398,8 +347,8 @@ static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
398static int 347static int
399nouveau_sysfs_init(struct drm_device *dev) 348nouveau_sysfs_init(struct drm_device *dev)
400{ 349{
401 struct drm_nouveau_private *dev_priv = dev->dev_private; 350 struct nouveau_drm *drm = nouveau_drm(dev);
402 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 351 struct nouveau_pm *pm = nouveau_pm(dev);
403 struct device *d = &dev->pdev->dev; 352 struct device *d = &dev->pdev->dev;
404 int ret, i; 353 int ret, i;
405 354
@@ -418,7 +367,7 @@ nouveau_sysfs_init(struct drm_device *dev)
418 367
419 ret = device_create_file(d, &perflvl->dev_attr); 368 ret = device_create_file(d, &perflvl->dev_attr);
420 if (ret) { 369 if (ret) {
421 NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n", 370 NV_ERROR(drm, "failed pervlvl %d sysfs: %d\n",
422 perflvl->id, i); 371 perflvl->id, i);
423 perflvl->dev_attr.attr.name = NULL; 372 perflvl->dev_attr.attr.name = NULL;
424 nouveau_pm_fini(dev); 373 nouveau_pm_fini(dev);
@@ -432,8 +381,7 @@ nouveau_sysfs_init(struct drm_device *dev)
432static void 381static void
433nouveau_sysfs_fini(struct drm_device *dev) 382nouveau_sysfs_fini(struct drm_device *dev)
434{ 383{
435 struct drm_nouveau_private *dev_priv = dev->dev_private; 384 struct nouveau_pm *pm = nouveau_pm(dev);
436 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
437 struct device *d = &dev->pdev->dev; 385 struct device *d = &dev->pdev->dev;
438 int i; 386 int i;
439 387
@@ -453,10 +401,10 @@ static ssize_t
453nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) 401nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
454{ 402{
455 struct drm_device *dev = dev_get_drvdata(d); 403 struct drm_device *dev = dev_get_drvdata(d);
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 404 struct nouveau_drm *drm = nouveau_drm(dev);
457 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 405 struct nouveau_therm *therm = nouveau_therm(drm->device);
458 406
459 return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000); 407 return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
460} 408}
461static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, 409static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
462 NULL, 0); 410 NULL, 0);
@@ -465,28 +413,25 @@ static ssize_t
465nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) 413nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
466{ 414{
467 struct drm_device *dev = dev_get_drvdata(d); 415 struct drm_device *dev = dev_get_drvdata(d);
468 struct drm_nouveau_private *dev_priv = dev->dev_private; 416 struct nouveau_drm *drm = nouveau_drm(dev);
469 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 417 struct nouveau_therm *therm = nouveau_therm(drm->device);
470 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
471 418
472 return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000); 419 return snprintf(buf, PAGE_SIZE, "%d\n",
420 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
473} 421}
474static ssize_t 422static ssize_t
475nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a, 423nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
476 const char *buf, size_t count) 424 const char *buf, size_t count)
477{ 425{
478 struct drm_device *dev = dev_get_drvdata(d); 426 struct drm_device *dev = dev_get_drvdata(d);
479 struct drm_nouveau_private *dev_priv = dev->dev_private; 427 struct nouveau_drm *drm = nouveau_drm(dev);
480 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 428 struct nouveau_therm *therm = nouveau_therm(drm->device);
481 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
482 long value; 429 long value;
483 430
484 if (kstrtol(buf, 10, &value) == -EINVAL) 431 if (kstrtol(buf, 10, &value) == -EINVAL)
485 return count; 432 return count;
486 433
487 temp->down_clock = value/1000; 434 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK, value / 1000);
488
489 nouveau_temp_safety_checks(dev);
490 435
491 return count; 436 return count;
492} 437}
@@ -499,11 +444,11 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
499 char *buf) 444 char *buf)
500{ 445{
501 struct drm_device *dev = dev_get_drvdata(d); 446 struct drm_device *dev = dev_get_drvdata(d);
502 struct drm_nouveau_private *dev_priv = dev->dev_private; 447 struct nouveau_drm *drm = nouveau_drm(dev);
503 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 448 struct nouveau_therm *therm = nouveau_therm(drm->device);
504 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
505 449
506 return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000); 450 return snprintf(buf, PAGE_SIZE, "%d\n",
451 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
507} 452}
508static ssize_t 453static ssize_t
509nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a, 454nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
@@ -511,17 +456,14 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
511 size_t count) 456 size_t count)
512{ 457{
513 struct drm_device *dev = dev_get_drvdata(d); 458 struct drm_device *dev = dev_get_drvdata(d);
514 struct drm_nouveau_private *dev_priv = dev->dev_private; 459 struct nouveau_drm *drm = nouveau_drm(dev);
515 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 460 struct nouveau_therm *therm = nouveau_therm(drm->device);
516 struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
517 long value; 461 long value;
518 462
519 if (kstrtol(buf, 10, &value) == -EINVAL) 463 if (kstrtol(buf, 10, &value) == -EINVAL)
520 return count; 464 return count;
521 465
522 temp->critical = value/1000; 466 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL, value / 1000);
523
524 nouveau_temp_safety_checks(dev);
525 467
526 return count; 468 return count;
527} 469}
@@ -553,47 +495,62 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
553 char *buf) 495 char *buf)
554{ 496{
555 struct drm_device *dev = dev_get_drvdata(d); 497 struct drm_device *dev = dev_get_drvdata(d);
556 struct drm_nouveau_private *dev_priv = dev->dev_private; 498 struct nouveau_drm *drm = nouveau_drm(dev);
557 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 499 struct nouveau_therm *therm = nouveau_therm(drm->device);
558 struct gpio_func gpio; 500
559 u32 cycles, cur, prev; 501 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
560 u64 start; 502}
503static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
504 NULL, 0);
505
506 static ssize_t
507nouveau_hwmon_get_pwm1_enable(struct device *d,
508 struct device_attribute *a, char *buf)
509{
510 struct drm_device *dev = dev_get_drvdata(d);
511 struct nouveau_drm *drm = nouveau_drm(dev);
512 struct nouveau_therm *therm = nouveau_therm(drm->device);
561 int ret; 513 int ret;
562 514
563 ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio); 515 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
564 if (ret) 516 if (ret < 0)
565 return ret; 517 return ret;
566 518
567 /* Monitor the GPIO input 0x3b for 250ms. 519 return sprintf(buf, "%i\n", ret);
568 * When the fan spins, it changes the value of GPIO FAN_SENSE. 520}
569 * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
570 */
571 start = ptimer->read(dev);
572 prev = nouveau_gpio_sense(dev, 0, gpio.line);
573 cycles = 0;
574 do {
575 cur = nouveau_gpio_sense(dev, 0, gpio.line);
576 if (prev != cur) {
577 cycles++;
578 prev = cur;
579 }
580 521
581 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ 522static ssize_t
582 } while (ptimer->read(dev) - start < 250000000); 523nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
524 const char *buf, size_t count)
525{
526 struct drm_device *dev = dev_get_drvdata(d);
527 struct nouveau_drm *drm = nouveau_drm(dev);
528 struct nouveau_therm *therm = nouveau_therm(drm->device);
529 long value;
530 int ret;
531
532 if (strict_strtol(buf, 10, &value) == -EINVAL)
533 return -EINVAL;
583 534
584 /* interpolate to get rpm */ 535 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MODE, value);
585 return sprintf(buf, "%i\n", cycles / 4 * 4 * 60); 536 if (ret)
537 return ret;
538 else
539 return count;
586} 540}
587static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input, 541static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
588 NULL, 0); 542 nouveau_hwmon_get_pwm1_enable,
543 nouveau_hwmon_set_pwm1_enable, 0);
589 544
590static ssize_t 545static ssize_t
591nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf) 546nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
592{ 547{
593 struct drm_device *dev = dev_get_drvdata(d); 548 struct drm_device *dev = dev_get_drvdata(d);
549 struct nouveau_drm *drm = nouveau_drm(dev);
550 struct nouveau_therm *therm = nouveau_therm(drm->device);
594 int ret; 551 int ret;
595 552
596 ret = nouveau_pwmfan_get(dev); 553 ret = therm->fan_get(therm);
597 if (ret < 0) 554 if (ret < 0)
598 return ret; 555 return ret;
599 556
@@ -601,12 +558,12 @@ nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
601} 558}
602 559
603static ssize_t 560static ssize_t
604nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a, 561nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
605 const char *buf, size_t count) 562 const char *buf, size_t count)
606{ 563{
607 struct drm_device *dev = dev_get_drvdata(d); 564 struct drm_device *dev = dev_get_drvdata(d);
608 struct drm_nouveau_private *dev_priv = dev->dev_private; 565 struct nouveau_drm *drm = nouveau_drm(dev);
609 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 566 struct nouveau_therm *therm = nouveau_therm(drm->device);
610 int ret = -ENODEV; 567 int ret = -ENODEV;
611 long value; 568 long value;
612 569
@@ -616,103 +573,96 @@ nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
616 if (kstrtol(buf, 10, &value) == -EINVAL) 573 if (kstrtol(buf, 10, &value) == -EINVAL)
617 return -EINVAL; 574 return -EINVAL;
618 575
619 if (value < pm->fan.min_duty) 576 ret = therm->fan_set(therm, value);
620 value = pm->fan.min_duty;
621 if (value > pm->fan.max_duty)
622 value = pm->fan.max_duty;
623
624 ret = nouveau_pwmfan_set(dev, value);
625 if (ret) 577 if (ret)
626 return ret; 578 return ret;
627 579
628 return count; 580 return count;
629} 581}
630 582
631static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR, 583static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
632 nouveau_hwmon_get_pwm0, 584 nouveau_hwmon_get_pwm1,
633 nouveau_hwmon_set_pwm0, 0); 585 nouveau_hwmon_set_pwm1, 0);
634 586
635static ssize_t 587static ssize_t
636nouveau_hwmon_get_pwm0_min(struct device *d, 588nouveau_hwmon_get_pwm1_min(struct device *d,
637 struct device_attribute *a, char *buf) 589 struct device_attribute *a, char *buf)
638{ 590{
639 struct drm_device *dev = dev_get_drvdata(d); 591 struct drm_device *dev = dev_get_drvdata(d);
640 struct drm_nouveau_private *dev_priv = dev->dev_private; 592 struct nouveau_drm *drm = nouveau_drm(dev);
641 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 593 struct nouveau_therm *therm = nouveau_therm(drm->device);
594 int ret;
595
596 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
597 if (ret < 0)
598 return ret;
642 599
643 return sprintf(buf, "%i\n", pm->fan.min_duty); 600 return sprintf(buf, "%i\n", ret);
644} 601}
645 602
646static ssize_t 603static ssize_t
647nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a, 604nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
648 const char *buf, size_t count) 605 const char *buf, size_t count)
649{ 606{
650 struct drm_device *dev = dev_get_drvdata(d); 607 struct drm_device *dev = dev_get_drvdata(d);
651 struct drm_nouveau_private *dev_priv = dev->dev_private; 608 struct nouveau_drm *drm = nouveau_drm(dev);
652 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 609 struct nouveau_therm *therm = nouveau_therm(drm->device);
653 long value; 610 long value;
611 int ret;
654 612
655 if (kstrtol(buf, 10, &value) == -EINVAL) 613 if (kstrtol(buf, 10, &value) == -EINVAL)
656 return -EINVAL; 614 return -EINVAL;
657 615
658 if (value < 0) 616 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY, value);
659 value = 0; 617 if (ret < 0)
660 618 return ret;
661 if (pm->fan.max_duty - value < 10)
662 value = pm->fan.max_duty - 10;
663
664 if (value < 10)
665 pm->fan.min_duty = 10;
666 else
667 pm->fan.min_duty = value;
668 619
669 return count; 620 return count;
670} 621}
671 622
672static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR, 623static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO | S_IWUSR,
673 nouveau_hwmon_get_pwm0_min, 624 nouveau_hwmon_get_pwm1_min,
674 nouveau_hwmon_set_pwm0_min, 0); 625 nouveau_hwmon_set_pwm1_min, 0);
675 626
676static ssize_t 627static ssize_t
677nouveau_hwmon_get_pwm0_max(struct device *d, 628nouveau_hwmon_get_pwm1_max(struct device *d,
678 struct device_attribute *a, char *buf) 629 struct device_attribute *a, char *buf)
679{ 630{
680 struct drm_device *dev = dev_get_drvdata(d); 631 struct drm_device *dev = dev_get_drvdata(d);
681 struct drm_nouveau_private *dev_priv = dev->dev_private; 632 struct nouveau_drm *drm = nouveau_drm(dev);
682 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 633 struct nouveau_therm *therm = nouveau_therm(drm->device);
634 int ret;
635
636 ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
637 if (ret < 0)
638 return ret;
683 639
684 return sprintf(buf, "%i\n", pm->fan.max_duty); 640 return sprintf(buf, "%i\n", ret);
685} 641}
686 642
687static ssize_t 643static ssize_t
688nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a, 644nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
689 const char *buf, size_t count) 645 const char *buf, size_t count)
690{ 646{
691 struct drm_device *dev = dev_get_drvdata(d); 647 struct drm_device *dev = dev_get_drvdata(d);
692 struct drm_nouveau_private *dev_priv = dev->dev_private; 648 struct nouveau_drm *drm = nouveau_drm(dev);
693 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 649 struct nouveau_therm *therm = nouveau_therm(drm->device);
694 long value; 650 long value;
651 int ret;
695 652
696 if (kstrtol(buf, 10, &value) == -EINVAL) 653 if (kstrtol(buf, 10, &value) == -EINVAL)
697 return -EINVAL; 654 return -EINVAL;
698 655
699 if (value < 0) 656 ret = therm->attr_set(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY, value);
700 value = 0; 657 if (ret < 0)
701 658 return ret;
702 if (value - pm->fan.min_duty < 10)
703 value = pm->fan.min_duty + 10;
704
705 if (value > 100)
706 pm->fan.max_duty = 100;
707 else
708 pm->fan.max_duty = value;
709 659
710 return count; 660 return count;
711} 661}
712 662
713static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR, 663static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
714 nouveau_hwmon_get_pwm0_max, 664 nouveau_hwmon_get_pwm1_max,
715 nouveau_hwmon_set_pwm0_max, 0); 665 nouveau_hwmon_set_pwm1_max, 0);
716 666
717static struct attribute *hwmon_attributes[] = { 667static struct attribute *hwmon_attributes[] = {
718 &sensor_dev_attr_temp1_input.dev_attr.attr, 668 &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -727,9 +677,10 @@ static struct attribute *hwmon_fan_rpm_attributes[] = {
727 NULL 677 NULL
728}; 678};
729static struct attribute *hwmon_pwm_fan_attributes[] = { 679static struct attribute *hwmon_pwm_fan_attributes[] = {
730 &sensor_dev_attr_pwm0.dev_attr.attr, 680 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
731 &sensor_dev_attr_pwm0_min.dev_attr.attr, 681 &sensor_dev_attr_pwm1.dev_attr.attr,
732 &sensor_dev_attr_pwm0_max.dev_attr.attr, 682 &sensor_dev_attr_pwm1_min.dev_attr.attr,
683 &sensor_dev_attr_pwm1_max.dev_attr.attr,
733 NULL 684 NULL
734}; 685};
735 686
@@ -747,20 +698,22 @@ static const struct attribute_group hwmon_pwm_fan_attrgroup = {
747static int 698static int
748nouveau_hwmon_init(struct drm_device *dev) 699nouveau_hwmon_init(struct drm_device *dev)
749{ 700{
750 struct drm_nouveau_private *dev_priv = dev->dev_private; 701 struct nouveau_pm *pm = nouveau_pm(dev);
751 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 702 struct nouveau_drm *drm = nouveau_drm(dev);
703 struct nouveau_therm *therm = nouveau_therm(drm->device);
704
752#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 705#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
753 struct device *hwmon_dev; 706 struct device *hwmon_dev;
754 int ret = 0; 707 int ret = 0;
755 708
756 if (!pm->temp_get) 709 if (!therm || !therm->temp_get || !therm->attr_get ||
710 !therm->attr_set || therm->temp_get(therm) < 0)
757 return -ENODEV; 711 return -ENODEV;
758 712
759 hwmon_dev = hwmon_device_register(&dev->pdev->dev); 713 hwmon_dev = hwmon_device_register(&dev->pdev->dev);
760 if (IS_ERR(hwmon_dev)) { 714 if (IS_ERR(hwmon_dev)) {
761 ret = PTR_ERR(hwmon_dev); 715 ret = PTR_ERR(hwmon_dev);
762 NV_ERROR(dev, 716 NV_ERROR(drm, "Unable to register hwmon device: %d\n", ret);
763 "Unable to register hwmon device: %d\n", ret);
764 return ret; 717 return ret;
765 } 718 }
766 dev_set_drvdata(hwmon_dev, dev); 719 dev_set_drvdata(hwmon_dev, dev);
@@ -776,7 +729,7 @@ nouveau_hwmon_init(struct drm_device *dev)
776 /*XXX: incorrect, need better detection for this, some boards have 729 /*XXX: incorrect, need better detection for this, some boards have
777 * the gpio entries for pwm fan control even when there's no 730 * the gpio entries for pwm fan control even when there's no
778 * actual fan connected to it... therm table? */ 731 * actual fan connected to it... therm table? */
779 if (nouveau_pwmfan_get(dev) >= 0) { 732 if (therm->fan_get && therm->fan_get(therm) >= 0) {
780 ret = sysfs_create_group(&dev->pdev->dev.kobj, 733 ret = sysfs_create_group(&dev->pdev->dev.kobj,
781 &hwmon_pwm_fan_attrgroup); 734 &hwmon_pwm_fan_attrgroup);
782 if (ret) 735 if (ret)
@@ -784,7 +737,7 @@ nouveau_hwmon_init(struct drm_device *dev)
784 } 737 }
785 738
786 /* if the card can read the fan rpm */ 739 /* if the card can read the fan rpm */
787 if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) { 740 if (therm->fan_sense(therm) >= 0) {
788 ret = sysfs_create_group(&dev->pdev->dev.kobj, 741 ret = sysfs_create_group(&dev->pdev->dev.kobj,
789 &hwmon_fan_rpm_attrgroup); 742 &hwmon_fan_rpm_attrgroup);
790 if (ret) 743 if (ret)
@@ -796,7 +749,7 @@ nouveau_hwmon_init(struct drm_device *dev)
796 return 0; 749 return 0;
797 750
798error: 751error:
799 NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret); 752 NV_ERROR(drm, "Unable to create some hwmon sysfs files: %d\n", ret);
800 hwmon_device_unregister(hwmon_dev); 753 hwmon_device_unregister(hwmon_dev);
801 pm->hwmon = NULL; 754 pm->hwmon = NULL;
802 return ret; 755 return ret;
@@ -810,8 +763,7 @@ static void
810nouveau_hwmon_fini(struct drm_device *dev) 763nouveau_hwmon_fini(struct drm_device *dev)
811{ 764{
812#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 765#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
813 struct drm_nouveau_private *dev_priv = dev->dev_private; 766 struct nouveau_pm *pm = nouveau_pm(dev);
814 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
815 767
816 if (pm->hwmon) { 768 if (pm->hwmon) {
817 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 769 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
@@ -829,16 +781,15 @@ nouveau_hwmon_fini(struct drm_device *dev)
829static int 781static int
830nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) 782nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
831{ 783{
832 struct drm_nouveau_private *dev_priv = 784 struct nouveau_pm *pm = container_of(nb, struct nouveau_pm, acpi_nb);
833 container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); 785 struct nouveau_drm *drm = nouveau_drm(pm->dev);
834 struct drm_device *dev = dev_priv->dev;
835 struct acpi_bus_event *entry = (struct acpi_bus_event *)data; 786 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
836 787
837 if (strcmp(entry->device_class, "ac_adapter") == 0) { 788 if (strcmp(entry->device_class, "ac_adapter") == 0) {
838 bool ac = power_supply_is_system_supplied(); 789 bool ac = power_supply_is_system_supplied();
839 790
840 NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); 791 NV_DEBUG(drm, "power supply changed: %s\n", ac ? "AC" : "DC");
841 nouveau_pm_trigger(dev); 792 nouveau_pm_trigger(pm->dev);
842 } 793 }
843 794
844 return NOTIFY_OK; 795 return NOTIFY_OK;
@@ -848,19 +799,67 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
848int 799int
849nouveau_pm_init(struct drm_device *dev) 800nouveau_pm_init(struct drm_device *dev)
850{ 801{
851 struct drm_nouveau_private *dev_priv = dev->dev_private; 802 struct nouveau_device *device = nouveau_dev(dev);
852 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 803 struct nouveau_drm *drm = nouveau_drm(dev);
804 struct nouveau_pm *pm;
853 char info[256]; 805 char info[256];
854 int ret, i; 806 int ret, i;
855 807
808 pm = drm->pm = kzalloc(sizeof(*pm), GFP_KERNEL);
809 if (!pm)
810 return -ENOMEM;
811
812 pm->dev = dev;
813
814 if (device->card_type < NV_40) {
815 pm->clocks_get = nv04_pm_clocks_get;
816 pm->clocks_pre = nv04_pm_clocks_pre;
817 pm->clocks_set = nv04_pm_clocks_set;
818 if (nouveau_gpio(drm->device)) {
819 pm->voltage_get = nouveau_voltage_gpio_get;
820 pm->voltage_set = nouveau_voltage_gpio_set;
821 }
822 } else
823 if (device->card_type < NV_50) {
824 pm->clocks_get = nv40_pm_clocks_get;
825 pm->clocks_pre = nv40_pm_clocks_pre;
826 pm->clocks_set = nv40_pm_clocks_set;
827 pm->voltage_get = nouveau_voltage_gpio_get;
828 pm->voltage_set = nouveau_voltage_gpio_set;
829 } else
830 if (device->card_type < NV_C0) {
831 if (device->chipset < 0xa3 ||
832 device->chipset == 0xaa ||
833 device->chipset == 0xac) {
834 pm->clocks_get = nv50_pm_clocks_get;
835 pm->clocks_pre = nv50_pm_clocks_pre;
836 pm->clocks_set = nv50_pm_clocks_set;
837 } else {
838 pm->clocks_get = nva3_pm_clocks_get;
839 pm->clocks_pre = nva3_pm_clocks_pre;
840 pm->clocks_set = nva3_pm_clocks_set;
841 }
842 pm->voltage_get = nouveau_voltage_gpio_get;
843 pm->voltage_set = nouveau_voltage_gpio_set;
844 } else
845 if (device->card_type < NV_E0) {
846 pm->clocks_get = nvc0_pm_clocks_get;
847 pm->clocks_pre = nvc0_pm_clocks_pre;
848 pm->clocks_set = nvc0_pm_clocks_set;
849 pm->voltage_get = nouveau_voltage_gpio_get;
850 pm->voltage_set = nouveau_voltage_gpio_set;
851 }
852
853
856 /* parse aux tables from vbios */ 854 /* parse aux tables from vbios */
857 nouveau_volt_init(dev); 855 nouveau_volt_init(dev);
858 nouveau_temp_init(dev); 856
857 INIT_LIST_HEAD(&pm->profiles);
859 858
860 /* determine current ("boot") performance level */ 859 /* determine current ("boot") performance level */
861 ret = nouveau_pm_perflvl_get(dev, &pm->boot); 860 ret = nouveau_pm_perflvl_get(dev, &pm->boot);
862 if (ret) { 861 if (ret) {
863 NV_ERROR(dev, "failed to determine boot perflvl\n"); 862 NV_ERROR(drm, "failed to determine boot perflvl\n");
864 return ret; 863 return ret;
865 } 864 }
866 865
@@ -868,7 +867,6 @@ nouveau_pm_init(struct drm_device *dev)
868 strncpy(pm->boot.profile.name, "boot", 4); 867 strncpy(pm->boot.profile.name, "boot", 4);
869 pm->boot.profile.func = &nouveau_pm_static_profile_func; 868 pm->boot.profile.func = &nouveau_pm_static_profile_func;
870 869
871 INIT_LIST_HEAD(&pm->profiles);
872 list_add(&pm->boot.profile.head, &pm->profiles); 870 list_add(&pm->boot.profile.head, &pm->profiles);
873 871
874 pm->profile_ac = &pm->boot.profile; 872 pm->profile_ac = &pm->boot.profile;
@@ -880,22 +878,19 @@ nouveau_pm_init(struct drm_device *dev)
880 nouveau_perf_init(dev); 878 nouveau_perf_init(dev);
881 879
882 /* display available performance levels */ 880 /* display available performance levels */
883 NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl); 881 NV_INFO(drm, "%d available performance level(s)\n", pm->nr_perflvl);
884 for (i = 0; i < pm->nr_perflvl; i++) { 882 for (i = 0; i < pm->nr_perflvl; i++) {
885 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info)); 883 nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
886 NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info); 884 NV_INFO(drm, "%d:%s", pm->perflvl[i].id, info);
887 } 885 }
888 886
889 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info)); 887 nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
890 NV_INFO(dev, "c:%s", info); 888 NV_INFO(drm, "c:%s", info);
891 889
892 /* switch performance levels now if requested */ 890 /* switch performance levels now if requested */
893 if (nouveau_perflvl != NULL) 891 if (nouveau_perflvl != NULL)
894 nouveau_pm_profile_set(dev, nouveau_perflvl); 892 nouveau_pm_profile_set(dev, nouveau_perflvl);
895 893
896 /* determine the current fan speed */
897 pm->fan.percent = nouveau_pwmfan_get(dev);
898
899 nouveau_sysfs_init(dev); 894 nouveau_sysfs_init(dev);
900 nouveau_hwmon_init(dev); 895 nouveau_hwmon_init(dev);
901#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY) 896#if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
@@ -909,8 +904,7 @@ nouveau_pm_init(struct drm_device *dev)
909void 904void
910nouveau_pm_fini(struct drm_device *dev) 905nouveau_pm_fini(struct drm_device *dev)
911{ 906{
912 struct drm_nouveau_private *dev_priv = dev->dev_private; 907 struct nouveau_pm *pm = nouveau_pm(dev);
913 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
914 struct nouveau_pm_profile *profile, *tmp; 908 struct nouveau_pm_profile *profile, *tmp;
915 909
916 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) { 910 list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
@@ -921,7 +915,6 @@ nouveau_pm_fini(struct drm_device *dev)
921 if (pm->cur != &pm->boot) 915 if (pm->cur != &pm->boot)
922 nouveau_pm_perflvl_set(dev, &pm->boot); 916 nouveau_pm_perflvl_set(dev, &pm->boot);
923 917
924 nouveau_temp_fini(dev);
925 nouveau_perf_fini(dev); 918 nouveau_perf_fini(dev);
926 nouveau_volt_fini(dev); 919 nouveau_volt_fini(dev);
927 920
@@ -930,13 +923,15 @@ nouveau_pm_fini(struct drm_device *dev)
930#endif 923#endif
931 nouveau_hwmon_fini(dev); 924 nouveau_hwmon_fini(dev);
932 nouveau_sysfs_fini(dev); 925 nouveau_sysfs_fini(dev);
926
927 nouveau_drm(dev)->pm = NULL;
928 kfree(pm);
933} 929}
934 930
935void 931void
936nouveau_pm_resume(struct drm_device *dev) 932nouveau_pm_resume(struct drm_device *dev)
937{ 933{
938 struct drm_nouveau_private *dev_priv = dev->dev_private; 934 struct nouveau_pm *pm = nouveau_pm(dev);
939 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
940 struct nouveau_pm_level *perflvl; 935 struct nouveau_pm_level *perflvl;
941 936
942 if (!pm->cur || pm->cur == &pm->boot) 937 if (!pm->cur || pm->cur == &pm->boot)
@@ -945,5 +940,4 @@ nouveau_pm_resume(struct drm_device *dev)
945 perflvl = pm->cur; 940 perflvl = pm->cur;
946 pm->cur = &pm->boot; 941 pm->cur = &pm->boot;
947 nouveau_pm_perflvl_set(dev, perflvl); 942 nouveau_pm_perflvl_set(dev, perflvl);
948 nouveau_pwmfan_set(dev, pm->fan.percent);
949} 943}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 07cac72c72b4..73b789c230a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -25,6 +25,165 @@
25#ifndef __NOUVEAU_PM_H__ 25#ifndef __NOUVEAU_PM_H__
26#define __NOUVEAU_PM_H__ 26#define __NOUVEAU_PM_H__
27 27
28#include <subdev/bios/pll.h>
29#include <subdev/clock.h>
30
31struct nouveau_pm_voltage_level {
32 u32 voltage; /* microvolts */
33 u8 vid;
34};
35
36struct nouveau_pm_voltage {
37 bool supported;
38 u8 version;
39 u8 vid_mask;
40
41 struct nouveau_pm_voltage_level *level;
42 int nr_level;
43};
44
45/* Exclusive upper limits */
46#define NV_MEM_CL_DDR2_MAX 8
47#define NV_MEM_WR_DDR2_MAX 9
48#define NV_MEM_CL_DDR3_MAX 17
49#define NV_MEM_WR_DDR3_MAX 17
50#define NV_MEM_CL_GDDR3_MAX 16
51#define NV_MEM_WR_GDDR3_MAX 18
52#define NV_MEM_CL_GDDR5_MAX 21
53#define NV_MEM_WR_GDDR5_MAX 20
54
55struct nouveau_pm_memtiming {
56 int id;
57
58 u32 reg[9];
59 u32 mr[4];
60
61 u8 tCWL;
62
63 u8 odt;
64 u8 drive_strength;
65};
66
67struct nouveau_pm_tbl_header {
68 u8 version;
69 u8 header_len;
70 u8 entry_cnt;
71 u8 entry_len;
72};
73
74struct nouveau_pm_tbl_entry {
75 u8 tWR;
76 u8 tWTR;
77 u8 tCL;
78 u8 tRC;
79 u8 empty_4;
80 u8 tRFC; /* Byte 5 */
81 u8 empty_6;
82 u8 tRAS; /* Byte 7 */
83 u8 empty_8;
84 u8 tRP; /* Byte 9 */
85 u8 tRCDRD;
86 u8 tRCDWR;
87 u8 tRRD;
88 u8 tUNK_13;
89 u8 RAM_FT1; /* 14, a bitmask of random RAM features */
90 u8 empty_15;
91 u8 tUNK_16;
92 u8 empty_17;
93 u8 tUNK_18;
94 u8 tCWL;
95 u8 tUNK_20, tUNK_21;
96};
97
98struct nouveau_pm_profile;
99struct nouveau_pm_profile_func {
100 void (*destroy)(struct nouveau_pm_profile *);
101 void (*init)(struct nouveau_pm_profile *);
102 void (*fini)(struct nouveau_pm_profile *);
103 struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
104};
105
106struct nouveau_pm_profile {
107 const struct nouveau_pm_profile_func *func;
108 struct list_head head;
109 char name[8];
110};
111
112#define NOUVEAU_PM_MAX_LEVEL 8
113struct nouveau_pm_level {
114 struct nouveau_pm_profile profile;
115 struct device_attribute dev_attr;
116 char name[32];
117 int id;
118
119 struct nouveau_pm_memtiming timing;
120 u32 memory;
121 u16 memscript;
122
123 u32 core;
124 u32 shader;
125 u32 rop;
126 u32 copy;
127 u32 daemon;
128 u32 vdec;
129 u32 dom6;
130 u32 unka0; /* nva3:nvc0 */
131 u32 hub01; /* nvc0- */
132 u32 hub06; /* nvc0- */
133 u32 hub07; /* nvc0- */
134
135 u32 volt_min; /* microvolts */
136 u32 volt_max;
137 u8 fanspeed;
138};
139
140struct nouveau_pm_temp_sensor_constants {
141 u16 offset_constant;
142 s16 offset_mult;
143 s16 offset_div;
144 s16 slope_mult;
145 s16 slope_div;
146};
147
148struct nouveau_pm_threshold_temp {
149 s16 critical;
150 s16 down_clock;
151};
152
153struct nouveau_pm {
154 struct drm_device *dev;
155
156 struct nouveau_pm_voltage voltage;
157 struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
158 int nr_perflvl;
159 struct nouveau_pm_temp_sensor_constants sensor_constants;
160 struct nouveau_pm_threshold_temp threshold_temp;
161
162 struct nouveau_pm_profile *profile_ac;
163 struct nouveau_pm_profile *profile_dc;
164 struct nouveau_pm_profile *profile;
165 struct list_head profiles;
166
167 struct nouveau_pm_level boot;
168 struct nouveau_pm_level *cur;
169
170 struct device *hwmon;
171 struct notifier_block acpi_nb;
172
173 int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
174 void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
175 int (*clocks_set)(struct drm_device *, void *);
176
177 int (*voltage_get)(struct drm_device *);
178 int (*voltage_set)(struct drm_device *, int voltage);
179};
180
181static inline struct nouveau_pm *
182nouveau_pm(struct drm_device *dev)
183{
184 return nouveau_drm(dev)->pm;
185}
186
28struct nouveau_mem_exec_func { 187struct nouveau_mem_exec_func {
29 struct drm_device *dev; 188 struct drm_device *dev;
30 void (*precharge)(struct nouveau_mem_exec_func *); 189 void (*precharge)(struct nouveau_mem_exec_func *);
@@ -99,11 +258,26 @@ int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
99void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *); 258void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
100int nvc0_pm_clocks_set(struct drm_device *, void *); 259int nvc0_pm_clocks_set(struct drm_device *, void *);
101 260
102/* nouveau_temp.c */ 261/* nouveau_mem.c */
103void nouveau_temp_init(struct drm_device *dev); 262int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
104void nouveau_temp_fini(struct drm_device *dev); 263 struct nouveau_pm_memtiming *);
105void nouveau_temp_safety_checks(struct drm_device *dev); 264void nouveau_mem_timing_read(struct drm_device *,
106int nv40_temp_get(struct drm_device *dev); 265 struct nouveau_pm_memtiming *);
107int nv84_temp_get(struct drm_device *dev); 266
267static inline int
268nva3_calc_pll(struct drm_device *dev, struct nvbios_pll *pll, u32 freq,
269 int *N, int *fN, int *M, int *P)
270{
271 struct nouveau_device *device = nouveau_dev(dev);
272 struct nouveau_clock *clk = nouveau_clock(device);
273 struct nouveau_pll_vals pv;
274 int ret;
275
276 ret = clk->pll_calc(clk, pll, freq, &pv);
277 *N = pv.N1;
278 *M = pv.M1;
279 *P = pv.log2P;
280 return ret;
281}
108 282
109#endif 283#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index aef7181415a8..366462cf8a2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,13 +22,12 @@
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <linux/dma-buf.h>
26 26
27#include "nouveau_drv.h" 27#include <drm/drmP.h>
28#include <drm/nouveau_drm.h>
29#include "nouveau_dma.h"
30 28
31#include <linux/dma-buf.h> 29#include "nouveau_drm.h"
30#include "nouveau_gem.h"
32 31
33static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, 32static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
34 enum dma_data_direction dir) 33 enum dma_data_direction dir)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
deleted file mode 100644
index 0ebb62f1fc80..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29
30static u32
31nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
32{
33 struct drm_device *dev = chan->dev;
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_ramht *ramht = chan->ramht;
36 u32 hash = 0;
37 int i;
38
39 NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
40
41 for (i = 32; i > 0; i -= ramht->bits) {
42 hash ^= (handle & ((1 << ramht->bits) - 1));
43 handle >>= ramht->bits;
44 }
45
46 if (dev_priv->card_type < NV_50)
47 hash ^= chan->id << (ramht->bits - 4);
48 hash <<= 3;
49
50 NV_DEBUG(dev, "hash=0x%08x\n", hash);
51 return hash;
52}
53
54static int
55nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
56 u32 offset)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 u32 ctx = nv_ro32(ramht, offset + 4);
60
61 if (dev_priv->card_type < NV_40)
62 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
63 return (ctx != 0);
64}
65
66static int
67nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
68 struct nouveau_gpuobj *ramht, u32 offset)
69{
70 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
71 u32 ctx = nv_ro32(ramht, offset + 4);
72
73 if (dev_priv->card_type >= NV_50)
74 return true;
75 else if (dev_priv->card_type >= NV_40)
76 return chan->id ==
77 ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
78 else
79 return chan->id ==
80 ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
81}
82
83int
84nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
85 struct nouveau_gpuobj *gpuobj)
86{
87 struct drm_device *dev = chan->dev;
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
90 struct nouveau_ramht_entry *entry;
91 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
92 unsigned long flags;
93 u32 ctx, co, ho;
94
95 if (nouveau_ramht_find(chan, handle))
96 return -EEXIST;
97
98 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
99 if (!entry)
100 return -ENOMEM;
101 entry->channel = chan;
102 entry->gpuobj = NULL;
103 entry->handle = handle;
104 nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
105
106 if (dev_priv->card_type < NV_40) {
107 ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
108 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
109 (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
110 } else
111 if (dev_priv->card_type < NV_50) {
112 ctx = (gpuobj->pinst >> 4) |
113 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
114 (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
115 } else {
116 if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
117 ctx = (gpuobj->cinst << 10) |
118 (chan->id << 28) |
119 chan->id; /* HASH_TAG */
120 } else {
121 ctx = (gpuobj->cinst >> 4) |
122 ((gpuobj->engine <<
123 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
124 }
125 }
126
127 spin_lock_irqsave(&chan->ramht->lock, flags);
128 list_add(&entry->head, &chan->ramht->entries);
129
130 co = ho = nouveau_ramht_hash_handle(chan, handle);
131 do {
132 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
133 NV_DEBUG(dev,
134 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
135 chan->id, co, handle, ctx);
136 nv_wo32(ramht, co + 0, handle);
137 nv_wo32(ramht, co + 4, ctx);
138
139 spin_unlock_irqrestore(&chan->ramht->lock, flags);
140 instmem->flush(dev);
141 return 0;
142 }
143 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
144 chan->id, co, nv_ro32(ramht, co));
145
146 co += 8;
147 if (co >= ramht->size)
148 co = 0;
149 } while (co != ho);
150
151 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
152 list_del(&entry->head);
153 spin_unlock_irqrestore(&chan->ramht->lock, flags);
154 kfree(entry);
155 return -ENOMEM;
156}
157
158static struct nouveau_ramht_entry *
159nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
160{
161 struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
162 struct nouveau_ramht_entry *entry;
163 unsigned long flags;
164
165 if (!ramht)
166 return NULL;
167
168 spin_lock_irqsave(&ramht->lock, flags);
169 list_for_each_entry(entry, &ramht->entries, head) {
170 if (entry->channel == chan &&
171 (!handle || entry->handle == handle)) {
172 list_del(&entry->head);
173 spin_unlock_irqrestore(&ramht->lock, flags);
174
175 return entry;
176 }
177 }
178 spin_unlock_irqrestore(&ramht->lock, flags);
179
180 return NULL;
181}
182
183static void
184nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
185{
186 struct drm_device *dev = chan->dev;
187 struct drm_nouveau_private *dev_priv = dev->dev_private;
188 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
189 struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
190 unsigned long flags;
191 u32 co, ho;
192
193 spin_lock_irqsave(&chan->ramht->lock, flags);
194 co = ho = nouveau_ramht_hash_handle(chan, handle);
195 do {
196 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
197 nouveau_ramht_entry_same_channel(chan, ramht, co) &&
198 (handle == nv_ro32(ramht, co))) {
199 NV_DEBUG(dev,
200 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
201 chan->id, co, handle, nv_ro32(ramht, co + 4));
202 nv_wo32(ramht, co + 0, 0x00000000);
203 nv_wo32(ramht, co + 4, 0x00000000);
204 instmem->flush(dev);
205 goto out;
206 }
207
208 co += 8;
209 if (co >= ramht->size)
210 co = 0;
211 } while (co != ho);
212
213 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
214 chan->id, handle);
215out:
216 spin_unlock_irqrestore(&chan->ramht->lock, flags);
217}
218
219int
220nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
221{
222 struct nouveau_ramht_entry *entry;
223
224 entry = nouveau_ramht_remove_entry(chan, handle);
225 if (!entry)
226 return -ENOENT;
227
228 nouveau_ramht_remove_hash(chan, entry->handle);
229 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
230 kfree(entry);
231 return 0;
232}
233
234struct nouveau_gpuobj *
235nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
236{
237 struct nouveau_ramht *ramht = chan->ramht;
238 struct nouveau_ramht_entry *entry;
239 struct nouveau_gpuobj *gpuobj = NULL;
240 unsigned long flags;
241
242 if (unlikely(!chan->ramht))
243 return NULL;
244
245 spin_lock_irqsave(&ramht->lock, flags);
246 list_for_each_entry(entry, &chan->ramht->entries, head) {
247 if (entry->channel == chan && entry->handle == handle) {
248 gpuobj = entry->gpuobj;
249 break;
250 }
251 }
252 spin_unlock_irqrestore(&ramht->lock, flags);
253
254 return gpuobj;
255}
256
257int
258nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
259 struct nouveau_ramht **pramht)
260{
261 struct nouveau_ramht *ramht;
262
263 ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
264 if (!ramht)
265 return -ENOMEM;
266
267 ramht->dev = dev;
268 kref_init(&ramht->refcount);
269 ramht->bits = drm_order(gpuobj->size / 8);
270 INIT_LIST_HEAD(&ramht->entries);
271 spin_lock_init(&ramht->lock);
272 nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
273
274 *pramht = ramht;
275 return 0;
276}
277
278static void
279nouveau_ramht_del(struct kref *ref)
280{
281 struct nouveau_ramht *ramht =
282 container_of(ref, struct nouveau_ramht, refcount);
283
284 nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
285 kfree(ramht);
286}
287
288void
289nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
290 struct nouveau_channel *chan)
291{
292 struct nouveau_ramht_entry *entry;
293 struct nouveau_ramht *ramht;
294
295 if (ref)
296 kref_get(&ref->refcount);
297
298 ramht = *ptr;
299 if (ramht) {
300 while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
301 nouveau_ramht_remove_hash(chan, entry->handle);
302 nouveau_gpuobj_ref(NULL, &entry->gpuobj);
303 kfree(entry);
304 }
305
306 kref_put(&ramht->refcount, nouveau_ramht_del);
307 }
308 *ptr = ref;
309}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9d76a82d3c90..ca5492ac2da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,11 +1,10 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <linux/pagemap.h> 1#include <linux/pagemap.h>
4#include <linux/slab.h> 2#include <linux/slab.h>
5 3
6#define NV_CTXDMA_PAGE_SHIFT 12 4#include <subdev/fb.h>
7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) 5
8#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) 6#include "nouveau_drm.h"
7#include "nouveau_ttm.h"
9 8
10struct nouveau_sgdma_be { 9struct nouveau_sgdma_be {
11 /* this has to be the first field so populate/unpopulated in 10 /* this has to be the first field so populate/unpopulated in
@@ -13,7 +12,7 @@ struct nouveau_sgdma_be {
13 */ 12 */
14 struct ttm_dma_tt ttm; 13 struct ttm_dma_tt ttm;
15 struct drm_device *dev; 14 struct drm_device *dev;
16 u64 offset; 15 struct nouveau_mem *node;
17}; 16};
18 17
19static void 18static void
@@ -22,7 +21,6 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
22 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23 22
24 if (ttm) { 23 if (ttm) {
25 NV_DEBUG(nvbe->dev, "\n");
26 ttm_dma_tt_fini(&nvbe->ttm); 24 ttm_dma_tt_fini(&nvbe->ttm);
27 kfree(nvbe); 25 kfree(nvbe);
28 } 26 }
@@ -32,25 +30,18 @@ static int
32nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 30nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33{ 31{
34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 struct drm_device *dev = nvbe->dev; 33 struct nouveau_mem *node = mem->mm_node;
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 u64 size = mem->num_pages << 12;
37 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
38 unsigned i, j, pte;
39
40 NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
41
42 nvbe->offset = mem->start << PAGE_SHIFT;
43 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
44 for (i = 0; i < ttm->num_pages; i++) {
45 dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
46 uint32_t offset_l = lower_32_bits(dma_offset);
47 35
48 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { 36 if (ttm->sg) {
49 nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); 37 node->sg = ttm->sg;
50 offset_l += NV_CTXDMA_PAGE_SIZE; 38 nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
51 } 39 } else {
40 node->pages = nvbe->ttm.dma_address;
41 nouveau_vm_map_sg(&node->vma[0], 0, size, node);
52 } 42 }
53 43
44 nvbe->node = node;
54 return 0; 45 return 0;
55} 46}
56 47
@@ -58,22 +49,7 @@ static int
58nv04_sgdma_unbind(struct ttm_tt *ttm) 49nv04_sgdma_unbind(struct ttm_tt *ttm)
59{ 50{
60 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; 51 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
61 struct drm_device *dev = nvbe->dev; 52 nouveau_vm_unmap(&nvbe->node->vma[0]);
62 struct drm_nouveau_private *dev_priv = dev->dev_private;
63 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
64 unsigned i, j, pte;
65
66 NV_DEBUG(dev, "\n");
67
68 if (ttm->state != tt_bound)
69 return 0;
70
71 pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
72 for (i = 0; i < ttm->num_pages; i++) {
73 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
74 nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
75 }
76
77 return 0; 53 return 0;
78} 54}
79 55
@@ -83,206 +59,6 @@ static struct ttm_backend_func nv04_sgdma_backend = {
83 .destroy = nouveau_sgdma_destroy 59 .destroy = nouveau_sgdma_destroy
84}; 60};
85 61
86static void
87nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
88{
89 struct drm_device *dev = nvbe->dev;
90
91 nv_wr32(dev, 0x100810, 0x00000022);
92 if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
93 NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
94 nv_rd32(dev, 0x100810));
95 nv_wr32(dev, 0x100810, 0x00000000);
96}
97
98static int
99nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
100{
101 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
102 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
103 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
104 dma_addr_t *list = nvbe->ttm.dma_address;
105 u32 pte = mem->start << 2;
106 u32 cnt = ttm->num_pages;
107
108 nvbe->offset = mem->start << PAGE_SHIFT;
109
110 while (cnt--) {
111 nv_wo32(pgt, pte, (*list++ >> 7) | 1);
112 pte += 4;
113 }
114
115 nv41_sgdma_flush(nvbe);
116 return 0;
117}
118
119static int
120nv41_sgdma_unbind(struct ttm_tt *ttm)
121{
122 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
124 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
125 u32 pte = (nvbe->offset >> 12) << 2;
126 u32 cnt = ttm->num_pages;
127
128 while (cnt--) {
129 nv_wo32(pgt, pte, 0x00000000);
130 pte += 4;
131 }
132
133 nv41_sgdma_flush(nvbe);
134 return 0;
135}
136
137static struct ttm_backend_func nv41_sgdma_backend = {
138 .bind = nv41_sgdma_bind,
139 .unbind = nv41_sgdma_unbind,
140 .destroy = nouveau_sgdma_destroy
141};
142
143static void
144nv44_sgdma_flush(struct ttm_tt *ttm)
145{
146 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
147 struct drm_device *dev = nvbe->dev;
148
149 nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
150 nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
151 if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
152 NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
153 nv_rd32(dev, 0x100808));
154 nv_wr32(dev, 0x100808, 0x00000000);
155}
156
157static void
158nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
159{
160 struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
161 dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
162 u32 pte, tmp[4];
163
164 pte = base >> 2;
165 base &= ~0x0000000f;
166
167 tmp[0] = nv_ro32(pgt, base + 0x0);
168 tmp[1] = nv_ro32(pgt, base + 0x4);
169 tmp[2] = nv_ro32(pgt, base + 0x8);
170 tmp[3] = nv_ro32(pgt, base + 0xc);
171 while (cnt--) {
172 u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
173 switch (pte++ & 0x3) {
174 case 0:
175 tmp[0] &= ~0x07ffffff;
176 tmp[0] |= addr;
177 break;
178 case 1:
179 tmp[0] &= ~0xf8000000;
180 tmp[0] |= addr << 27;
181 tmp[1] &= ~0x003fffff;
182 tmp[1] |= addr >> 5;
183 break;
184 case 2:
185 tmp[1] &= ~0xffc00000;
186 tmp[1] |= addr << 22;
187 tmp[2] &= ~0x0001ffff;
188 tmp[2] |= addr >> 10;
189 break;
190 case 3:
191 tmp[2] &= ~0xfffe0000;
192 tmp[2] |= addr << 17;
193 tmp[3] &= ~0x00000fff;
194 tmp[3] |= addr >> 15;
195 break;
196 }
197 }
198
199 tmp[3] |= 0x40000000;
200
201 nv_wo32(pgt, base + 0x0, tmp[0]);
202 nv_wo32(pgt, base + 0x4, tmp[1]);
203 nv_wo32(pgt, base + 0x8, tmp[2]);
204 nv_wo32(pgt, base + 0xc, tmp[3]);
205}
206
207static int
208nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
209{
210 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
211 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
212 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
213 dma_addr_t *list = nvbe->ttm.dma_address;
214 u32 pte = mem->start << 2, tmp[4];
215 u32 cnt = ttm->num_pages;
216 int i;
217
218 nvbe->offset = mem->start << PAGE_SHIFT;
219
220 if (pte & 0x0000000c) {
221 u32 max = 4 - ((pte >> 2) & 0x3);
222 u32 part = (cnt > max) ? max : cnt;
223 nv44_sgdma_fill(pgt, list, pte, part);
224 pte += (part << 2);
225 list += part;
226 cnt -= part;
227 }
228
229 while (cnt >= 4) {
230 for (i = 0; i < 4; i++)
231 tmp[i] = *list++ >> 12;
232 nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
233 nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
234 nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
235 nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
236 pte += 0x10;
237 cnt -= 4;
238 }
239
240 if (cnt)
241 nv44_sgdma_fill(pgt, list, pte, cnt);
242
243 nv44_sgdma_flush(ttm);
244 return 0;
245}
246
247static int
248nv44_sgdma_unbind(struct ttm_tt *ttm)
249{
250 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
251 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
252 struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
253 u32 pte = (nvbe->offset >> 12) << 2;
254 u32 cnt = ttm->num_pages;
255
256 if (pte & 0x0000000c) {
257 u32 max = 4 - ((pte >> 2) & 0x3);
258 u32 part = (cnt > max) ? max : cnt;
259 nv44_sgdma_fill(pgt, NULL, pte, part);
260 pte += (part << 2);
261 cnt -= part;
262 }
263
264 while (cnt >= 4) {
265 nv_wo32(pgt, pte + 0x0, 0x00000000);
266 nv_wo32(pgt, pte + 0x4, 0x00000000);
267 nv_wo32(pgt, pte + 0x8, 0x00000000);
268 nv_wo32(pgt, pte + 0xc, 0x00000000);
269 pte += 0x10;
270 cnt -= 4;
271 }
272
273 if (cnt)
274 nv44_sgdma_fill(pgt, NULL, pte, cnt);
275
276 nv44_sgdma_flush(ttm);
277 return 0;
278}
279
280static struct ttm_backend_func nv44_sgdma_backend = {
281 .bind = nv44_sgdma_bind,
282 .unbind = nv44_sgdma_unbind,
283 .destroy = nouveau_sgdma_destroy
284};
285
286static int 62static int
287nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) 63nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
288{ 64{
@@ -315,16 +91,18 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
315 unsigned long size, uint32_t page_flags, 91 unsigned long size, uint32_t page_flags,
316 struct page *dummy_read_page) 92 struct page *dummy_read_page)
317{ 93{
318 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 94 struct nouveau_drm *drm = nouveau_bdev(bdev);
319 struct drm_device *dev = dev_priv->dev;
320 struct nouveau_sgdma_be *nvbe; 95 struct nouveau_sgdma_be *nvbe;
321 96
322 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); 97 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
323 if (!nvbe) 98 if (!nvbe)
324 return NULL; 99 return NULL;
325 100
326 nvbe->dev = dev; 101 nvbe->dev = drm->dev;
327 nvbe->ttm.ttm.func = dev_priv->gart_info.func; 102 if (nv_device(drm->device)->card_type < NV_50)
103 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
104 else
105 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
328 106
329 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) { 107 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
330 kfree(nvbe); 108 kfree(nvbe);
@@ -332,116 +110,3 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
332 } 110 }
333 return &nvbe->ttm.ttm; 111 return &nvbe->ttm.ttm;
334} 112}
335
336int
337nouveau_sgdma_init(struct drm_device *dev)
338{
339 struct drm_nouveau_private *dev_priv = dev->dev_private;
340 struct nouveau_gpuobj *gpuobj = NULL;
341 u32 aper_size, align;
342 int ret;
343
344 if (dev_priv->card_type >= NV_40)
345 aper_size = 512 * 1024 * 1024;
346 else
347 aper_size = 128 * 1024 * 1024;
348
349 /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
350 * christmas. The cards before it have them, the cards after
351 * it have them, why is NV44 so unloved?
352 */
353 dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
354 if (!dev_priv->gart_info.dummy.page)
355 return -ENOMEM;
356
357 dev_priv->gart_info.dummy.addr =
358 pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
359 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
360 if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
361 NV_ERROR(dev, "error mapping dummy page\n");
362 __free_page(dev_priv->gart_info.dummy.page);
363 dev_priv->gart_info.dummy.page = NULL;
364 return -ENOMEM;
365 }
366
367 if (dev_priv->card_type >= NV_50) {
368 dev_priv->gart_info.aper_base = 0;
369 dev_priv->gart_info.aper_size = aper_size;
370 dev_priv->gart_info.type = NOUVEAU_GART_HW;
371 dev_priv->gart_info.func = &nv50_sgdma_backend;
372 } else
373 if (0 && pci_is_pcie(dev->pdev) &&
374 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
375 if (nv44_graph_class(dev)) {
376 dev_priv->gart_info.func = &nv44_sgdma_backend;
377 align = 512 * 1024;
378 } else {
379 dev_priv->gart_info.func = &nv41_sgdma_backend;
380 align = 16;
381 }
382
383 ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
384 NVOBJ_FLAG_ZERO_ALLOC |
385 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
386 if (ret) {
387 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
388 return ret;
389 }
390
391 dev_priv->gart_info.sg_ctxdma = gpuobj;
392 dev_priv->gart_info.aper_base = 0;
393 dev_priv->gart_info.aper_size = aper_size;
394 dev_priv->gart_info.type = NOUVEAU_GART_HW;
395 } else {
396 ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
397 NVOBJ_FLAG_ZERO_ALLOC |
398 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
399 if (ret) {
400 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
401 return ret;
402 }
403
404 nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
405 (1 << 12) /* PT present */ |
406 (0 << 13) /* PT *not* linear */ |
407 (0 << 14) /* RW */ |
408 (2 << 16) /* PCI */);
409 nv_wo32(gpuobj, 4, aper_size - 1);
410
411 dev_priv->gart_info.sg_ctxdma = gpuobj;
412 dev_priv->gart_info.aper_base = 0;
413 dev_priv->gart_info.aper_size = aper_size;
414 dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
415 dev_priv->gart_info.func = &nv04_sgdma_backend;
416 }
417
418 return 0;
419}
420
421void
422nouveau_sgdma_takedown(struct drm_device *dev)
423{
424 struct drm_nouveau_private *dev_priv = dev->dev_private;
425
426 nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
427
428 if (dev_priv->gart_info.dummy.page) {
429 pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
430 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
431 __free_page(dev_priv->gart_info.dummy.page);
432 dev_priv->gart_info.dummy.page = NULL;
433 }
434}
435
436uint32_t
437nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
438{
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
441 int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
442
443 BUG_ON(dev_priv->card_type >= NV_50);
444
445 return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
446 (offset & NV_CTXDMA_PAGE_MASK);
447}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
deleted file mode 100644
index 709e5ac680ec..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ /dev/null
@@ -1,56 +0,0 @@
1#ifndef __NOUVEAU_SOFTWARE_H__
2#define __NOUVEAU_SOFTWARE_H__
3
4struct nouveau_software_priv {
5 struct nouveau_exec_engine base;
6 struct list_head vblank;
7 spinlock_t peephole_lock;
8};
9
10struct nouveau_software_chan {
11 struct list_head flip;
12 struct {
13 struct list_head list;
14 u32 channel;
15 u32 ctxdma;
16 u32 offset;
17 u32 value;
18 u32 head;
19 } vblank;
20};
21
22static inline void
23nouveau_software_context_new(struct nouveau_software_chan *pch)
24{
25 INIT_LIST_HEAD(&pch->flip);
26 INIT_LIST_HEAD(&pch->vblank.list);
27}
28
29static inline void
30nouveau_software_create(struct nouveau_software_priv *psw)
31{
32 INIT_LIST_HEAD(&psw->vblank);
33 spin_lock_init(&psw->peephole_lock);
34}
35
36static inline u16
37nouveau_software_class(struct drm_device *dev)
38{
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 if (dev_priv->card_type <= NV_04)
41 return 0x006e;
42 if (dev_priv->card_type <= NV_40)
43 return 0x016e;
44 if (dev_priv->card_type <= NV_50)
45 return 0x506e;
46 if (dev_priv->card_type <= NV_E0)
47 return 0x906e;
48 return 0x0000;
49}
50
51int nv04_software_create(struct drm_device *);
52int nv50_software_create(struct drm_device *);
53int nvc0_software_create(struct drm_device *);
54u64 nvc0_software_crtc(struct nouveau_channel *, int crtc);
55
56#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
deleted file mode 100644
index 30fe9291d17e..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ /dev/null
@@ -1,1304 +0,0 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include <linux/slab.h>
28#include <drm/drmP.h>
29#include <drm/drm_crtc_helper.h>
30#include <linux/vgaarb.h>
31#include <linux/vga_switcheroo.h>
32
33#include "nouveau_drv.h"
34#include <drm/nouveau_drm.h>
35#include "nouveau_fbcon.h"
36#include "nouveau_ramht.h"
37#include "nouveau_gpio.h"
38#include "nouveau_pm.h"
39#include "nv50_display.h"
40#include "nouveau_fifo.h"
41#include "nouveau_fence.h"
42#include "nouveau_software.h"
43
44static void nouveau_stub_takedown(struct drm_device *dev) {}
45static int nouveau_stub_init(struct drm_device *dev) { return 0; }
46
47static int nouveau_init_engine_ptrs(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nouveau_engine *engine = &dev_priv->engine;
51
52 switch (dev_priv->chipset & 0xf0) {
53 case 0x00:
54 engine->instmem.init = nv04_instmem_init;
55 engine->instmem.takedown = nv04_instmem_takedown;
56 engine->instmem.suspend = nv04_instmem_suspend;
57 engine->instmem.resume = nv04_instmem_resume;
58 engine->instmem.get = nv04_instmem_get;
59 engine->instmem.put = nv04_instmem_put;
60 engine->instmem.map = nv04_instmem_map;
61 engine->instmem.unmap = nv04_instmem_unmap;
62 engine->instmem.flush = nv04_instmem_flush;
63 engine->mc.init = nv04_mc_init;
64 engine->mc.takedown = nv04_mc_takedown;
65 engine->timer.init = nv04_timer_init;
66 engine->timer.read = nv04_timer_read;
67 engine->timer.takedown = nv04_timer_takedown;
68 engine->fb.init = nv04_fb_init;
69 engine->fb.takedown = nv04_fb_takedown;
70 engine->display.early_init = nv04_display_early_init;
71 engine->display.late_takedown = nv04_display_late_takedown;
72 engine->display.create = nv04_display_create;
73 engine->display.destroy = nv04_display_destroy;
74 engine->display.init = nv04_display_init;
75 engine->display.fini = nv04_display_fini;
76 engine->pm.clocks_get = nv04_pm_clocks_get;
77 engine->pm.clocks_pre = nv04_pm_clocks_pre;
78 engine->pm.clocks_set = nv04_pm_clocks_set;
79 engine->vram.init = nv04_fb_vram_init;
80 engine->vram.takedown = nouveau_stub_takedown;
81 engine->vram.flags_valid = nouveau_mem_flags_valid;
82 break;
83 case 0x10:
84 engine->instmem.init = nv04_instmem_init;
85 engine->instmem.takedown = nv04_instmem_takedown;
86 engine->instmem.suspend = nv04_instmem_suspend;
87 engine->instmem.resume = nv04_instmem_resume;
88 engine->instmem.get = nv04_instmem_get;
89 engine->instmem.put = nv04_instmem_put;
90 engine->instmem.map = nv04_instmem_map;
91 engine->instmem.unmap = nv04_instmem_unmap;
92 engine->instmem.flush = nv04_instmem_flush;
93 engine->mc.init = nv04_mc_init;
94 engine->mc.takedown = nv04_mc_takedown;
95 engine->timer.init = nv04_timer_init;
96 engine->timer.read = nv04_timer_read;
97 engine->timer.takedown = nv04_timer_takedown;
98 engine->fb.init = nv10_fb_init;
99 engine->fb.takedown = nv10_fb_takedown;
100 engine->fb.init_tile_region = nv10_fb_init_tile_region;
101 engine->fb.set_tile_region = nv10_fb_set_tile_region;
102 engine->fb.free_tile_region = nv10_fb_free_tile_region;
103 engine->display.early_init = nv04_display_early_init;
104 engine->display.late_takedown = nv04_display_late_takedown;
105 engine->display.create = nv04_display_create;
106 engine->display.destroy = nv04_display_destroy;
107 engine->display.init = nv04_display_init;
108 engine->display.fini = nv04_display_fini;
109 engine->gpio.drive = nv10_gpio_drive;
110 engine->gpio.sense = nv10_gpio_sense;
111 engine->pm.clocks_get = nv04_pm_clocks_get;
112 engine->pm.clocks_pre = nv04_pm_clocks_pre;
113 engine->pm.clocks_set = nv04_pm_clocks_set;
114 if (dev_priv->chipset == 0x1a ||
115 dev_priv->chipset == 0x1f)
116 engine->vram.init = nv1a_fb_vram_init;
117 else
118 engine->vram.init = nv10_fb_vram_init;
119 engine->vram.takedown = nouveau_stub_takedown;
120 engine->vram.flags_valid = nouveau_mem_flags_valid;
121 break;
122 case 0x20:
123 engine->instmem.init = nv04_instmem_init;
124 engine->instmem.takedown = nv04_instmem_takedown;
125 engine->instmem.suspend = nv04_instmem_suspend;
126 engine->instmem.resume = nv04_instmem_resume;
127 engine->instmem.get = nv04_instmem_get;
128 engine->instmem.put = nv04_instmem_put;
129 engine->instmem.map = nv04_instmem_map;
130 engine->instmem.unmap = nv04_instmem_unmap;
131 engine->instmem.flush = nv04_instmem_flush;
132 engine->mc.init = nv04_mc_init;
133 engine->mc.takedown = nv04_mc_takedown;
134 engine->timer.init = nv04_timer_init;
135 engine->timer.read = nv04_timer_read;
136 engine->timer.takedown = nv04_timer_takedown;
137 engine->fb.init = nv20_fb_init;
138 engine->fb.takedown = nv20_fb_takedown;
139 engine->fb.init_tile_region = nv20_fb_init_tile_region;
140 engine->fb.set_tile_region = nv20_fb_set_tile_region;
141 engine->fb.free_tile_region = nv20_fb_free_tile_region;
142 engine->display.early_init = nv04_display_early_init;
143 engine->display.late_takedown = nv04_display_late_takedown;
144 engine->display.create = nv04_display_create;
145 engine->display.destroy = nv04_display_destroy;
146 engine->display.init = nv04_display_init;
147 engine->display.fini = nv04_display_fini;
148 engine->gpio.drive = nv10_gpio_drive;
149 engine->gpio.sense = nv10_gpio_sense;
150 engine->pm.clocks_get = nv04_pm_clocks_get;
151 engine->pm.clocks_pre = nv04_pm_clocks_pre;
152 engine->pm.clocks_set = nv04_pm_clocks_set;
153 engine->vram.init = nv20_fb_vram_init;
154 engine->vram.takedown = nouveau_stub_takedown;
155 engine->vram.flags_valid = nouveau_mem_flags_valid;
156 break;
157 case 0x30:
158 engine->instmem.init = nv04_instmem_init;
159 engine->instmem.takedown = nv04_instmem_takedown;
160 engine->instmem.suspend = nv04_instmem_suspend;
161 engine->instmem.resume = nv04_instmem_resume;
162 engine->instmem.get = nv04_instmem_get;
163 engine->instmem.put = nv04_instmem_put;
164 engine->instmem.map = nv04_instmem_map;
165 engine->instmem.unmap = nv04_instmem_unmap;
166 engine->instmem.flush = nv04_instmem_flush;
167 engine->mc.init = nv04_mc_init;
168 engine->mc.takedown = nv04_mc_takedown;
169 engine->timer.init = nv04_timer_init;
170 engine->timer.read = nv04_timer_read;
171 engine->timer.takedown = nv04_timer_takedown;
172 engine->fb.init = nv30_fb_init;
173 engine->fb.takedown = nv30_fb_takedown;
174 engine->fb.init_tile_region = nv30_fb_init_tile_region;
175 engine->fb.set_tile_region = nv10_fb_set_tile_region;
176 engine->fb.free_tile_region = nv30_fb_free_tile_region;
177 engine->display.early_init = nv04_display_early_init;
178 engine->display.late_takedown = nv04_display_late_takedown;
179 engine->display.create = nv04_display_create;
180 engine->display.destroy = nv04_display_destroy;
181 engine->display.init = nv04_display_init;
182 engine->display.fini = nv04_display_fini;
183 engine->gpio.drive = nv10_gpio_drive;
184 engine->gpio.sense = nv10_gpio_sense;
185 engine->pm.clocks_get = nv04_pm_clocks_get;
186 engine->pm.clocks_pre = nv04_pm_clocks_pre;
187 engine->pm.clocks_set = nv04_pm_clocks_set;
188 engine->pm.voltage_get = nouveau_voltage_gpio_get;
189 engine->pm.voltage_set = nouveau_voltage_gpio_set;
190 engine->vram.init = nv20_fb_vram_init;
191 engine->vram.takedown = nouveau_stub_takedown;
192 engine->vram.flags_valid = nouveau_mem_flags_valid;
193 break;
194 case 0x40:
195 case 0x60:
196 engine->instmem.init = nv04_instmem_init;
197 engine->instmem.takedown = nv04_instmem_takedown;
198 engine->instmem.suspend = nv04_instmem_suspend;
199 engine->instmem.resume = nv04_instmem_resume;
200 engine->instmem.get = nv04_instmem_get;
201 engine->instmem.put = nv04_instmem_put;
202 engine->instmem.map = nv04_instmem_map;
203 engine->instmem.unmap = nv04_instmem_unmap;
204 engine->instmem.flush = nv04_instmem_flush;
205 engine->mc.init = nv40_mc_init;
206 engine->mc.takedown = nv40_mc_takedown;
207 engine->timer.init = nv04_timer_init;
208 engine->timer.read = nv04_timer_read;
209 engine->timer.takedown = nv04_timer_takedown;
210 engine->fb.init = nv40_fb_init;
211 engine->fb.takedown = nv40_fb_takedown;
212 engine->fb.init_tile_region = nv30_fb_init_tile_region;
213 engine->fb.set_tile_region = nv40_fb_set_tile_region;
214 engine->fb.free_tile_region = nv30_fb_free_tile_region;
215 engine->display.early_init = nv04_display_early_init;
216 engine->display.late_takedown = nv04_display_late_takedown;
217 engine->display.create = nv04_display_create;
218 engine->display.destroy = nv04_display_destroy;
219 engine->display.init = nv04_display_init;
220 engine->display.fini = nv04_display_fini;
221 engine->gpio.init = nv10_gpio_init;
222 engine->gpio.fini = nv10_gpio_fini;
223 engine->gpio.drive = nv10_gpio_drive;
224 engine->gpio.sense = nv10_gpio_sense;
225 engine->gpio.irq_enable = nv10_gpio_irq_enable;
226 engine->pm.clocks_get = nv40_pm_clocks_get;
227 engine->pm.clocks_pre = nv40_pm_clocks_pre;
228 engine->pm.clocks_set = nv40_pm_clocks_set;
229 engine->pm.voltage_get = nouveau_voltage_gpio_get;
230 engine->pm.voltage_set = nouveau_voltage_gpio_set;
231 engine->pm.temp_get = nv40_temp_get;
232 engine->pm.pwm_get = nv40_pm_pwm_get;
233 engine->pm.pwm_set = nv40_pm_pwm_set;
234 engine->vram.init = nv40_fb_vram_init;
235 engine->vram.takedown = nouveau_stub_takedown;
236 engine->vram.flags_valid = nouveau_mem_flags_valid;
237 break;
238 case 0x50:
239 case 0x80: /* gotta love NVIDIA's consistency.. */
240 case 0x90:
241 case 0xa0:
242 engine->instmem.init = nv50_instmem_init;
243 engine->instmem.takedown = nv50_instmem_takedown;
244 engine->instmem.suspend = nv50_instmem_suspend;
245 engine->instmem.resume = nv50_instmem_resume;
246 engine->instmem.get = nv50_instmem_get;
247 engine->instmem.put = nv50_instmem_put;
248 engine->instmem.map = nv50_instmem_map;
249 engine->instmem.unmap = nv50_instmem_unmap;
250 if (dev_priv->chipset == 0x50)
251 engine->instmem.flush = nv50_instmem_flush;
252 else
253 engine->instmem.flush = nv84_instmem_flush;
254 engine->mc.init = nv50_mc_init;
255 engine->mc.takedown = nv50_mc_takedown;
256 engine->timer.init = nv04_timer_init;
257 engine->timer.read = nv04_timer_read;
258 engine->timer.takedown = nv04_timer_takedown;
259 engine->fb.init = nv50_fb_init;
260 engine->fb.takedown = nv50_fb_takedown;
261 engine->display.early_init = nv50_display_early_init;
262 engine->display.late_takedown = nv50_display_late_takedown;
263 engine->display.create = nv50_display_create;
264 engine->display.destroy = nv50_display_destroy;
265 engine->display.init = nv50_display_init;
266 engine->display.fini = nv50_display_fini;
267 engine->gpio.init = nv50_gpio_init;
268 engine->gpio.fini = nv50_gpio_fini;
269 engine->gpio.drive = nv50_gpio_drive;
270 engine->gpio.sense = nv50_gpio_sense;
271 engine->gpio.irq_enable = nv50_gpio_irq_enable;
272 switch (dev_priv->chipset) {
273 case 0x84:
274 case 0x86:
275 case 0x92:
276 case 0x94:
277 case 0x96:
278 case 0x98:
279 case 0xa0:
280 case 0xaa:
281 case 0xac:
282 case 0x50:
283 engine->pm.clocks_get = nv50_pm_clocks_get;
284 engine->pm.clocks_pre = nv50_pm_clocks_pre;
285 engine->pm.clocks_set = nv50_pm_clocks_set;
286 break;
287 default:
288 engine->pm.clocks_get = nva3_pm_clocks_get;
289 engine->pm.clocks_pre = nva3_pm_clocks_pre;
290 engine->pm.clocks_set = nva3_pm_clocks_set;
291 break;
292 }
293 engine->pm.voltage_get = nouveau_voltage_gpio_get;
294 engine->pm.voltage_set = nouveau_voltage_gpio_set;
295 if (dev_priv->chipset >= 0x84)
296 engine->pm.temp_get = nv84_temp_get;
297 else
298 engine->pm.temp_get = nv40_temp_get;
299 engine->pm.pwm_get = nv50_pm_pwm_get;
300 engine->pm.pwm_set = nv50_pm_pwm_set;
301 engine->vram.init = nv50_vram_init;
302 engine->vram.takedown = nv50_vram_fini;
303 engine->vram.get = nv50_vram_new;
304 engine->vram.put = nv50_vram_del;
305 engine->vram.flags_valid = nv50_vram_flags_valid;
306 break;
307 case 0xc0:
308 engine->instmem.init = nvc0_instmem_init;
309 engine->instmem.takedown = nvc0_instmem_takedown;
310 engine->instmem.suspend = nvc0_instmem_suspend;
311 engine->instmem.resume = nvc0_instmem_resume;
312 engine->instmem.get = nv50_instmem_get;
313 engine->instmem.put = nv50_instmem_put;
314 engine->instmem.map = nv50_instmem_map;
315 engine->instmem.unmap = nv50_instmem_unmap;
316 engine->instmem.flush = nv84_instmem_flush;
317 engine->mc.init = nv50_mc_init;
318 engine->mc.takedown = nv50_mc_takedown;
319 engine->timer.init = nv04_timer_init;
320 engine->timer.read = nv04_timer_read;
321 engine->timer.takedown = nv04_timer_takedown;
322 engine->fb.init = nvc0_fb_init;
323 engine->fb.takedown = nvc0_fb_takedown;
324 engine->display.early_init = nv50_display_early_init;
325 engine->display.late_takedown = nv50_display_late_takedown;
326 engine->display.create = nv50_display_create;
327 engine->display.destroy = nv50_display_destroy;
328 engine->display.init = nv50_display_init;
329 engine->display.fini = nv50_display_fini;
330 engine->gpio.init = nv50_gpio_init;
331 engine->gpio.fini = nv50_gpio_fini;
332 engine->gpio.drive = nv50_gpio_drive;
333 engine->gpio.sense = nv50_gpio_sense;
334 engine->gpio.irq_enable = nv50_gpio_irq_enable;
335 engine->vram.init = nvc0_vram_init;
336 engine->vram.takedown = nv50_vram_fini;
337 engine->vram.get = nvc0_vram_new;
338 engine->vram.put = nv50_vram_del;
339 engine->vram.flags_valid = nvc0_vram_flags_valid;
340 engine->pm.temp_get = nv84_temp_get;
341 engine->pm.clocks_get = nvc0_pm_clocks_get;
342 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
343 engine->pm.clocks_set = nvc0_pm_clocks_set;
344 engine->pm.voltage_get = nouveau_voltage_gpio_get;
345 engine->pm.voltage_set = nouveau_voltage_gpio_set;
346 engine->pm.pwm_get = nv50_pm_pwm_get;
347 engine->pm.pwm_set = nv50_pm_pwm_set;
348 break;
349 case 0xd0:
350 engine->instmem.init = nvc0_instmem_init;
351 engine->instmem.takedown = nvc0_instmem_takedown;
352 engine->instmem.suspend = nvc0_instmem_suspend;
353 engine->instmem.resume = nvc0_instmem_resume;
354 engine->instmem.get = nv50_instmem_get;
355 engine->instmem.put = nv50_instmem_put;
356 engine->instmem.map = nv50_instmem_map;
357 engine->instmem.unmap = nv50_instmem_unmap;
358 engine->instmem.flush = nv84_instmem_flush;
359 engine->mc.init = nv50_mc_init;
360 engine->mc.takedown = nv50_mc_takedown;
361 engine->timer.init = nv04_timer_init;
362 engine->timer.read = nv04_timer_read;
363 engine->timer.takedown = nv04_timer_takedown;
364 engine->fb.init = nvc0_fb_init;
365 engine->fb.takedown = nvc0_fb_takedown;
366 engine->display.early_init = nouveau_stub_init;
367 engine->display.late_takedown = nouveau_stub_takedown;
368 engine->display.create = nvd0_display_create;
369 engine->display.destroy = nvd0_display_destroy;
370 engine->display.init = nvd0_display_init;
371 engine->display.fini = nvd0_display_fini;
372 engine->gpio.init = nv50_gpio_init;
373 engine->gpio.fini = nv50_gpio_fini;
374 engine->gpio.drive = nvd0_gpio_drive;
375 engine->gpio.sense = nvd0_gpio_sense;
376 engine->gpio.irq_enable = nv50_gpio_irq_enable;
377 engine->vram.init = nvc0_vram_init;
378 engine->vram.takedown = nv50_vram_fini;
379 engine->vram.get = nvc0_vram_new;
380 engine->vram.put = nv50_vram_del;
381 engine->vram.flags_valid = nvc0_vram_flags_valid;
382 engine->pm.temp_get = nv84_temp_get;
383 engine->pm.clocks_get = nvc0_pm_clocks_get;
384 engine->pm.clocks_pre = nvc0_pm_clocks_pre;
385 engine->pm.clocks_set = nvc0_pm_clocks_set;
386 engine->pm.voltage_get = nouveau_voltage_gpio_get;
387 engine->pm.voltage_set = nouveau_voltage_gpio_set;
388 break;
389 case 0xe0:
390 engine->instmem.init = nvc0_instmem_init;
391 engine->instmem.takedown = nvc0_instmem_takedown;
392 engine->instmem.suspend = nvc0_instmem_suspend;
393 engine->instmem.resume = nvc0_instmem_resume;
394 engine->instmem.get = nv50_instmem_get;
395 engine->instmem.put = nv50_instmem_put;
396 engine->instmem.map = nv50_instmem_map;
397 engine->instmem.unmap = nv50_instmem_unmap;
398 engine->instmem.flush = nv84_instmem_flush;
399 engine->mc.init = nv50_mc_init;
400 engine->mc.takedown = nv50_mc_takedown;
401 engine->timer.init = nv04_timer_init;
402 engine->timer.read = nv04_timer_read;
403 engine->timer.takedown = nv04_timer_takedown;
404 engine->fb.init = nvc0_fb_init;
405 engine->fb.takedown = nvc0_fb_takedown;
406 engine->display.early_init = nouveau_stub_init;
407 engine->display.late_takedown = nouveau_stub_takedown;
408 engine->display.create = nvd0_display_create;
409 engine->display.destroy = nvd0_display_destroy;
410 engine->display.init = nvd0_display_init;
411 engine->display.fini = nvd0_display_fini;
412 engine->gpio.init = nv50_gpio_init;
413 engine->gpio.fini = nv50_gpio_fini;
414 engine->gpio.drive = nvd0_gpio_drive;
415 engine->gpio.sense = nvd0_gpio_sense;
416 engine->gpio.irq_enable = nv50_gpio_irq_enable;
417 engine->vram.init = nvc0_vram_init;
418 engine->vram.takedown = nv50_vram_fini;
419 engine->vram.get = nvc0_vram_new;
420 engine->vram.put = nv50_vram_del;
421 engine->vram.flags_valid = nvc0_vram_flags_valid;
422 break;
423 default:
424 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
425 return 1;
426 }
427
428 /* headless mode */
429 if (nouveau_modeset == 2) {
430 engine->display.early_init = nouveau_stub_init;
431 engine->display.late_takedown = nouveau_stub_takedown;
432 engine->display.create = nouveau_stub_init;
433 engine->display.init = nouveau_stub_init;
434 engine->display.destroy = nouveau_stub_takedown;
435 }
436
437 return 0;
438}
439
440static unsigned int
441nouveau_vga_set_decode(void *priv, bool state)
442{
443 struct drm_device *dev = priv;
444 struct drm_nouveau_private *dev_priv = dev->dev_private;
445
446 if (dev_priv->chipset >= 0x40)
447 nv_wr32(dev, 0x88054, state);
448 else
449 nv_wr32(dev, 0x1854, state);
450
451 if (state)
452 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
453 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
454 else
455 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
456}
457
458static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
459 enum vga_switcheroo_state state)
460{
461 struct drm_device *dev = pci_get_drvdata(pdev);
462 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
463 if (state == VGA_SWITCHEROO_ON) {
464 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
465 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
466 nouveau_pci_resume(pdev);
467 drm_kms_helper_poll_enable(dev);
468 dev->switch_power_state = DRM_SWITCH_POWER_ON;
469 } else {
470 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
471 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
472 drm_kms_helper_poll_disable(dev);
473 nouveau_switcheroo_optimus_dsm();
474 nouveau_pci_suspend(pdev, pmm);
475 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
476 }
477}
478
479static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
480{
481 struct drm_device *dev = pci_get_drvdata(pdev);
482 nouveau_fbcon_output_poll_changed(dev);
483}
484
485static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
486{
487 struct drm_device *dev = pci_get_drvdata(pdev);
488 bool can_switch;
489
490 spin_lock(&dev->count_lock);
491 can_switch = (dev->open_count == 0);
492 spin_unlock(&dev->count_lock);
493 return can_switch;
494}
495
496static void
497nouveau_card_channel_fini(struct drm_device *dev)
498{
499 struct drm_nouveau_private *dev_priv = dev->dev_private;
500
501 if (dev_priv->channel)
502 nouveau_channel_put_unlocked(&dev_priv->channel);
503}
504
505static int
506nouveau_card_channel_init(struct drm_device *dev)
507{
508 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 struct nouveau_channel *chan;
510 int ret;
511
512 ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
513 dev_priv->channel = chan;
514 if (ret)
515 return ret;
516 mutex_unlock(&dev_priv->channel->mutex);
517
518 nouveau_bo_move_init(chan);
519 return 0;
520}
521
522static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
523 .set_gpu_state = nouveau_switcheroo_set_state,
524 .reprobe = nouveau_switcheroo_reprobe,
525 .can_switch = nouveau_switcheroo_can_switch,
526};
527
528int
529nouveau_card_init(struct drm_device *dev)
530{
531 struct drm_nouveau_private *dev_priv = dev->dev_private;
532 struct nouveau_engine *engine;
533 int ret, e = 0;
534
535 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
536 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
537
538 /* Initialise internal driver API hooks */
539 ret = nouveau_init_engine_ptrs(dev);
540 if (ret)
541 goto out;
542 engine = &dev_priv->engine;
543 spin_lock_init(&dev_priv->channels.lock);
544 spin_lock_init(&dev_priv->tile.lock);
545 spin_lock_init(&dev_priv->context_switch_lock);
546 spin_lock_init(&dev_priv->vm_lock);
547
548 /* Make the CRTCs and I2C buses accessible */
549 ret = engine->display.early_init(dev);
550 if (ret)
551 goto out;
552
553 /* Parse BIOS tables / Run init tables if card not POSTed */
554 ret = nouveau_bios_init(dev);
555 if (ret)
556 goto out_display_early;
557
558 /* workaround an odd issue on nvc1 by disabling the device's
559 * nosnoop capability. hopefully won't cause issues until a
560 * better fix is found - assuming there is one...
561 */
562 if (dev_priv->chipset == 0xc1) {
563 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
564 }
565
566 /* PMC */
567 ret = engine->mc.init(dev);
568 if (ret)
569 goto out_bios;
570
571 /* PTIMER */
572 ret = engine->timer.init(dev);
573 if (ret)
574 goto out_mc;
575
576 /* PFB */
577 ret = engine->fb.init(dev);
578 if (ret)
579 goto out_timer;
580
581 ret = engine->vram.init(dev);
582 if (ret)
583 goto out_fb;
584
585 /* PGPIO */
586 ret = nouveau_gpio_create(dev);
587 if (ret)
588 goto out_vram;
589
590 ret = nouveau_gpuobj_init(dev);
591 if (ret)
592 goto out_gpio;
593
594 ret = engine->instmem.init(dev);
595 if (ret)
596 goto out_gpuobj;
597
598 ret = nouveau_mem_vram_init(dev);
599 if (ret)
600 goto out_instmem;
601
602 ret = nouveau_mem_gart_init(dev);
603 if (ret)
604 goto out_ttmvram;
605
606 if (!dev_priv->noaccel) {
607 switch (dev_priv->card_type) {
608 case NV_04:
609 nv04_fifo_create(dev);
610 break;
611 case NV_10:
612 case NV_20:
613 case NV_30:
614 if (dev_priv->chipset < 0x17)
615 nv10_fifo_create(dev);
616 else
617 nv17_fifo_create(dev);
618 break;
619 case NV_40:
620 nv40_fifo_create(dev);
621 break;
622 case NV_50:
623 if (dev_priv->chipset == 0x50)
624 nv50_fifo_create(dev);
625 else
626 nv84_fifo_create(dev);
627 break;
628 case NV_C0:
629 case NV_D0:
630 nvc0_fifo_create(dev);
631 break;
632 case NV_E0:
633 nve0_fifo_create(dev);
634 break;
635 default:
636 break;
637 }
638
639 switch (dev_priv->card_type) {
640 case NV_04:
641 nv04_fence_create(dev);
642 break;
643 case NV_10:
644 case NV_20:
645 case NV_30:
646 case NV_40:
647 case NV_50:
648 if (dev_priv->chipset < 0x84)
649 nv10_fence_create(dev);
650 else
651 nv84_fence_create(dev);
652 break;
653 case NV_C0:
654 case NV_D0:
655 case NV_E0:
656 nvc0_fence_create(dev);
657 break;
658 default:
659 break;
660 }
661
662 switch (dev_priv->card_type) {
663 case NV_04:
664 case NV_10:
665 case NV_20:
666 case NV_30:
667 case NV_40:
668 nv04_software_create(dev);
669 break;
670 case NV_50:
671 nv50_software_create(dev);
672 break;
673 case NV_C0:
674 case NV_D0:
675 case NV_E0:
676 nvc0_software_create(dev);
677 break;
678 default:
679 break;
680 }
681
682 switch (dev_priv->card_type) {
683 case NV_04:
684 nv04_graph_create(dev);
685 break;
686 case NV_10:
687 nv10_graph_create(dev);
688 break;
689 case NV_20:
690 case NV_30:
691 nv20_graph_create(dev);
692 break;
693 case NV_40:
694 nv40_graph_create(dev);
695 break;
696 case NV_50:
697 nv50_graph_create(dev);
698 break;
699 case NV_C0:
700 case NV_D0:
701 nvc0_graph_create(dev);
702 break;
703 case NV_E0:
704 nve0_graph_create(dev);
705 break;
706 default:
707 break;
708 }
709
710 switch (dev_priv->chipset) {
711 case 0x84:
712 case 0x86:
713 case 0x92:
714 case 0x94:
715 case 0x96:
716 case 0xa0:
717 nv84_crypt_create(dev);
718 break;
719 case 0x98:
720 case 0xaa:
721 case 0xac:
722 nv98_crypt_create(dev);
723 break;
724 }
725
726 switch (dev_priv->card_type) {
727 case NV_50:
728 switch (dev_priv->chipset) {
729 case 0xa3:
730 case 0xa5:
731 case 0xa8:
732 nva3_copy_create(dev);
733 break;
734 }
735 break;
736 case NV_C0:
737 if (!(nv_rd32(dev, 0x022500) & 0x00000200))
738 nvc0_copy_create(dev, 1);
739 case NV_D0:
740 if (!(nv_rd32(dev, 0x022500) & 0x00000100))
741 nvc0_copy_create(dev, 0);
742 break;
743 default:
744 break;
745 }
746
747 if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
748 nv84_bsp_create(dev);
749 nv84_vp_create(dev);
750 nv98_ppp_create(dev);
751 } else
752 if (dev_priv->chipset >= 0x84) {
753 nv50_mpeg_create(dev);
754 nv84_bsp_create(dev);
755 nv84_vp_create(dev);
756 } else
757 if (dev_priv->chipset >= 0x50) {
758 nv50_mpeg_create(dev);
759 } else
760 if (dev_priv->card_type == NV_40 ||
761 dev_priv->chipset == 0x31 ||
762 dev_priv->chipset == 0x34 ||
763 dev_priv->chipset == 0x36) {
764 nv31_mpeg_create(dev);
765 }
766
767 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
768 if (dev_priv->eng[e]) {
769 ret = dev_priv->eng[e]->init(dev, e);
770 if (ret)
771 goto out_engine;
772 }
773 }
774 }
775
776 ret = nouveau_irq_init(dev);
777 if (ret)
778 goto out_engine;
779
780 ret = nouveau_display_create(dev);
781 if (ret)
782 goto out_irq;
783
784 nouveau_backlight_init(dev);
785 nouveau_pm_init(dev);
786
787 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
788 ret = nouveau_card_channel_init(dev);
789 if (ret)
790 goto out_pm;
791 }
792
793 if (dev->mode_config.num_crtc) {
794 ret = nouveau_display_init(dev);
795 if (ret)
796 goto out_chan;
797
798 nouveau_fbcon_init(dev);
799 }
800
801 return 0;
802
803out_chan:
804 nouveau_card_channel_fini(dev);
805out_pm:
806 nouveau_pm_fini(dev);
807 nouveau_backlight_exit(dev);
808 nouveau_display_destroy(dev);
809out_irq:
810 nouveau_irq_fini(dev);
811out_engine:
812 if (!dev_priv->noaccel) {
813 for (e = e - 1; e >= 0; e--) {
814 if (!dev_priv->eng[e])
815 continue;
816 dev_priv->eng[e]->fini(dev, e, false);
817 dev_priv->eng[e]->destroy(dev,e );
818 }
819 }
820 nouveau_mem_gart_fini(dev);
821out_ttmvram:
822 nouveau_mem_vram_fini(dev);
823out_instmem:
824 engine->instmem.takedown(dev);
825out_gpuobj:
826 nouveau_gpuobj_takedown(dev);
827out_gpio:
828 nouveau_gpio_destroy(dev);
829out_vram:
830 engine->vram.takedown(dev);
831out_fb:
832 engine->fb.takedown(dev);
833out_timer:
834 engine->timer.takedown(dev);
835out_mc:
836 engine->mc.takedown(dev);
837out_bios:
838 nouveau_bios_takedown(dev);
839out_display_early:
840 engine->display.late_takedown(dev);
841out:
842 vga_switcheroo_unregister_client(dev->pdev);
843 vga_client_register(dev->pdev, NULL, NULL, NULL);
844 return ret;
845}
846
847static void nouveau_card_takedown(struct drm_device *dev)
848{
849 struct drm_nouveau_private *dev_priv = dev->dev_private;
850 struct nouveau_engine *engine = &dev_priv->engine;
851 int e;
852
853 if (dev->mode_config.num_crtc) {
854 nouveau_fbcon_fini(dev);
855 nouveau_display_fini(dev);
856 }
857
858 nouveau_card_channel_fini(dev);
859 nouveau_pm_fini(dev);
860 nouveau_backlight_exit(dev);
861 nouveau_display_destroy(dev);
862
863 if (!dev_priv->noaccel) {
864 for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
865 if (dev_priv->eng[e]) {
866 dev_priv->eng[e]->fini(dev, e, false);
867 dev_priv->eng[e]->destroy(dev,e );
868 }
869 }
870 }
871
872 if (dev_priv->vga_ram) {
873 nouveau_bo_unpin(dev_priv->vga_ram);
874 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
875 }
876
877 mutex_lock(&dev->struct_mutex);
878 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
879 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
880 mutex_unlock(&dev->struct_mutex);
881 nouveau_mem_gart_fini(dev);
882 nouveau_mem_vram_fini(dev);
883
884 engine->instmem.takedown(dev);
885 nouveau_gpuobj_takedown(dev);
886
887 nouveau_gpio_destroy(dev);
888 engine->vram.takedown(dev);
889 engine->fb.takedown(dev);
890 engine->timer.takedown(dev);
891 engine->mc.takedown(dev);
892
893 nouveau_bios_takedown(dev);
894 engine->display.late_takedown(dev);
895
896 nouveau_irq_fini(dev);
897
898 vga_switcheroo_unregister_client(dev->pdev);
899 vga_client_register(dev->pdev, NULL, NULL, NULL);
900}
901
902int
903nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
904{
905 struct drm_nouveau_private *dev_priv = dev->dev_private;
906 struct nouveau_fpriv *fpriv;
907 int ret;
908
909 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
910 if (unlikely(!fpriv))
911 return -ENOMEM;
912
913 spin_lock_init(&fpriv->lock);
914 INIT_LIST_HEAD(&fpriv->channels);
915
916 if (dev_priv->card_type == NV_50) {
917 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
918 &fpriv->vm);
919 if (ret) {
920 kfree(fpriv);
921 return ret;
922 }
923 } else
924 if (dev_priv->card_type >= NV_C0) {
925 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
926 &fpriv->vm);
927 if (ret) {
928 kfree(fpriv);
929 return ret;
930 }
931 }
932
933 file_priv->driver_priv = fpriv;
934 return 0;
935}
936
937/* here a client dies, release the stuff that was allocated for its
938 * file_priv */
939void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
940{
941 nouveau_channel_cleanup(dev, file_priv);
942}
943
944void
945nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
946{
947 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
948 nouveau_vm_ref(NULL, &fpriv->vm, NULL);
949 kfree(fpriv);
950}
951
952/* first module load, setup the mmio/fb mapping */
953/* KMS: we need mmio at load time, not when the first drm client opens. */
954int nouveau_firstopen(struct drm_device *dev)
955{
956 return 0;
957}
958
959/* if we have an OF card, copy vbios to RAMIN */
960static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
961{
962#if defined(__powerpc__)
963 int size, i;
964 const uint32_t *bios;
965 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
966 if (!dn) {
967 NV_INFO(dev, "Unable to get the OF node\n");
968 return;
969 }
970
971 bios = of_get_property(dn, "NVDA,BMP", &size);
972 if (bios) {
973 for (i = 0; i < size; i += 4)
974 nv_wi32(dev, i, bios[i/4]);
975 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
976 } else {
977 NV_INFO(dev, "Unable to get the OF bios\n");
978 }
979#endif
980}
981
982static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
983{
984 struct pci_dev *pdev = dev->pdev;
985 struct apertures_struct *aper = alloc_apertures(3);
986 if (!aper)
987 return NULL;
988
989 aper->ranges[0].base = pci_resource_start(pdev, 1);
990 aper->ranges[0].size = pci_resource_len(pdev, 1);
991 aper->count = 1;
992
993 if (pci_resource_len(pdev, 2)) {
994 aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
995 aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
996 aper->count++;
997 }
998
999 if (pci_resource_len(pdev, 3)) {
1000 aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
1001 aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
1002 aper->count++;
1003 }
1004
1005 return aper;
1006}
1007
1008static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
1009{
1010 struct drm_nouveau_private *dev_priv = dev->dev_private;
1011 bool primary = false;
1012 dev_priv->apertures = nouveau_get_apertures(dev);
1013 if (!dev_priv->apertures)
1014 return -ENOMEM;
1015
1016#ifdef CONFIG_X86
1017 primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1018#endif
1019
1020 remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
1021 return 0;
1022}
1023
1024int nouveau_load(struct drm_device *dev, unsigned long flags)
1025{
1026 struct drm_nouveau_private *dev_priv;
1027 unsigned long long offset, length;
1028 uint32_t reg0 = ~0, strap;
1029 int ret;
1030
1031 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1032 if (!dev_priv) {
1033 ret = -ENOMEM;
1034 goto err_out;
1035 }
1036 dev->dev_private = dev_priv;
1037 dev_priv->dev = dev;
1038
1039 pci_set_master(dev->pdev);
1040
1041 dev_priv->flags = flags & NOUVEAU_FLAGS;
1042
1043 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
1044 dev->pci_vendor, dev->pci_device, dev->pdev->class);
1045
1046 /* first up, map the start of mmio and determine the chipset */
1047 dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
1048 if (dev_priv->mmio) {
1049#ifdef __BIG_ENDIAN
1050 /* put the card into big-endian mode if it's not */
1051 if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
1052 nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
1053 DRM_MEMORYBARRIER();
1054#endif
1055
1056 /* determine chipset and derive architecture from it */
1057 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
1058 if ((reg0 & 0x0f000000) > 0) {
1059 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
1060 switch (dev_priv->chipset & 0xf0) {
1061 case 0x10:
1062 case 0x20:
1063 case 0x30:
1064 dev_priv->card_type = dev_priv->chipset & 0xf0;
1065 break;
1066 case 0x40:
1067 case 0x60:
1068 dev_priv->card_type = NV_40;
1069 break;
1070 case 0x50:
1071 case 0x80:
1072 case 0x90:
1073 case 0xa0:
1074 dev_priv->card_type = NV_50;
1075 break;
1076 case 0xc0:
1077 dev_priv->card_type = NV_C0;
1078 break;
1079 case 0xd0:
1080 dev_priv->card_type = NV_D0;
1081 break;
1082 case 0xe0:
1083 dev_priv->card_type = NV_E0;
1084 break;
1085 default:
1086 break;
1087 }
1088 } else
1089 if ((reg0 & 0xff00fff0) == 0x20004000) {
1090 if (reg0 & 0x00f00000)
1091 dev_priv->chipset = 0x05;
1092 else
1093 dev_priv->chipset = 0x04;
1094 dev_priv->card_type = NV_04;
1095 }
1096
1097 iounmap(dev_priv->mmio);
1098 }
1099
1100 if (!dev_priv->card_type) {
1101 NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
1102 ret = -EINVAL;
1103 goto err_priv;
1104 }
1105
1106 NV_INFO(dev, "Detected an NV%02x generation card (0x%08x)\n",
1107 dev_priv->card_type, reg0);
1108
1109 /* map the mmio regs, limiting the amount to preserve vmap space */
1110 offset = pci_resource_start(dev->pdev, 0);
1111 length = pci_resource_len(dev->pdev, 0);
1112 if (dev_priv->card_type < NV_E0)
1113 length = min(length, (unsigned long long)0x00800000);
1114
1115 dev_priv->mmio = ioremap(offset, length);
1116 if (!dev_priv->mmio) {
1117 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
1118 "Please report your setup to " DRIVER_EMAIL "\n");
1119 ret = -EINVAL;
1120 goto err_priv;
1121 }
1122 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", offset);
1123
1124 /* determine frequency of timing crystal */
1125 strap = nv_rd32(dev, 0x101000);
1126 if ( dev_priv->chipset < 0x17 ||
1127 (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
1128 strap &= 0x00000040;
1129 else
1130 strap &= 0x00400040;
1131
1132 switch (strap) {
1133 case 0x00000000: dev_priv->crystal = 13500; break;
1134 case 0x00000040: dev_priv->crystal = 14318; break;
1135 case 0x00400000: dev_priv->crystal = 27000; break;
1136 case 0x00400040: dev_priv->crystal = 25000; break;
1137 }
1138
1139 NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
1140
1141 /* Determine whether we'll attempt acceleration or not, some
1142 * cards are disabled by default here due to them being known
1143 * non-functional, or never been tested due to lack of hw.
1144 */
1145 dev_priv->noaccel = !!nouveau_noaccel;
1146 if (nouveau_noaccel == -1) {
1147 switch (dev_priv->chipset) {
1148 case 0xd9: /* known broken */
1149 case 0xe4: /* needs binary driver firmware */
1150 case 0xe7: /* needs binary driver firmware */
1151 NV_INFO(dev, "acceleration disabled by default, pass "
1152 "noaccel=0 to force enable\n");
1153 dev_priv->noaccel = true;
1154 break;
1155 default:
1156 dev_priv->noaccel = false;
1157 break;
1158 }
1159 }
1160
1161 ret = nouveau_remove_conflicting_drivers(dev);
1162 if (ret)
1163 goto err_mmio;
1164
1165 /* Map PRAMIN BAR, or on older cards, the aperture within BAR0 */
1166 if (dev_priv->card_type >= NV_40) {
1167 int ramin_bar = 2;
1168 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
1169 ramin_bar = 3;
1170
1171 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
1172 dev_priv->ramin =
1173 ioremap(pci_resource_start(dev->pdev, ramin_bar),
1174 dev_priv->ramin_size);
1175 if (!dev_priv->ramin) {
1176 NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
1177 ret = -ENOMEM;
1178 goto err_mmio;
1179 }
1180 } else {
1181 dev_priv->ramin_size = 1 * 1024 * 1024;
1182 dev_priv->ramin = ioremap(offset + NV_RAMIN,
1183 dev_priv->ramin_size);
1184 if (!dev_priv->ramin) {
1185 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
1186 ret = -ENOMEM;
1187 goto err_mmio;
1188 }
1189 }
1190
1191 nouveau_OF_copy_vbios_to_ramin(dev);
1192
1193 /* Special flags */
1194 if (dev->pci_device == 0x01a0)
1195 dev_priv->flags |= NV_NFORCE;
1196 else if (dev->pci_device == 0x01f0)
1197 dev_priv->flags |= NV_NFORCE2;
1198
1199 /* For kernel modesetting, init card now and bring up fbcon */
1200 ret = nouveau_card_init(dev);
1201 if (ret)
1202 goto err_ramin;
1203
1204 return 0;
1205
1206err_ramin:
1207 iounmap(dev_priv->ramin);
1208err_mmio:
1209 iounmap(dev_priv->mmio);
1210err_priv:
1211 kfree(dev_priv);
1212 dev->dev_private = NULL;
1213err_out:
1214 return ret;
1215}
1216
1217void nouveau_lastclose(struct drm_device *dev)
1218{
1219 vga_switcheroo_process_delayed_switch();
1220}
1221
1222int nouveau_unload(struct drm_device *dev)
1223{
1224 struct drm_nouveau_private *dev_priv = dev->dev_private;
1225
1226 nouveau_card_takedown(dev);
1227
1228 iounmap(dev_priv->mmio);
1229 iounmap(dev_priv->ramin);
1230
1231 kfree(dev_priv);
1232 dev->dev_private = NULL;
1233 return 0;
1234}
1235
1236/* Wait until (value(reg) & mask) == val, up until timeout has hit */
1237bool
1238nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
1239 uint32_t reg, uint32_t mask, uint32_t val)
1240{
1241 struct drm_nouveau_private *dev_priv = dev->dev_private;
1242 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1243 uint64_t start = ptimer->read(dev);
1244
1245 do {
1246 if ((nv_rd32(dev, reg) & mask) == val)
1247 return true;
1248 } while (ptimer->read(dev) - start < timeout);
1249
1250 return false;
1251}
1252
1253/* Wait until (value(reg) & mask) != val, up until timeout has hit */
1254bool
1255nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
1256 uint32_t reg, uint32_t mask, uint32_t val)
1257{
1258 struct drm_nouveau_private *dev_priv = dev->dev_private;
1259 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1260 uint64_t start = ptimer->read(dev);
1261
1262 do {
1263 if ((nv_rd32(dev, reg) & mask) != val)
1264 return true;
1265 } while (ptimer->read(dev) - start < timeout);
1266
1267 return false;
1268}
1269
1270/* Wait until cond(data) == true, up until timeout has hit */
1271bool
1272nouveau_wait_cb(struct drm_device *dev, u64 timeout,
1273 bool (*cond)(void *), void *data)
1274{
1275 struct drm_nouveau_private *dev_priv = dev->dev_private;
1276 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
1277 u64 start = ptimer->read(dev);
1278
1279 do {
1280 if (cond(data) == true)
1281 return true;
1282 } while (ptimer->read(dev) - start < timeout);
1283
1284 return false;
1285}
1286
1287/* Waits for PGRAPH to go completely idle */
1288bool nouveau_wait_for_idle(struct drm_device *dev)
1289{
1290 struct drm_nouveau_private *dev_priv = dev->dev_private;
1291 uint32_t mask = ~0;
1292
1293 if (dev_priv->card_type == NV_40)
1294 mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
1295
1296 if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
1297 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
1298 nv_rd32(dev, NV04_PGRAPH_STATUS));
1299 return false;
1300 }
1301
1302 return true;
1303}
1304
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
deleted file mode 100644
index 1ad411dcc57a..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * Copyright 2010 PathScale inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include <linux/module.h>
26
27#include <drm/drmP.h>
28
29#include "nouveau_drv.h"
30#include "nouveau_pm.h"
31
32static void
33nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
37 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
38 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
39 int i, headerlen, recordlen, entries;
40
41 if (!temp) {
42 NV_DEBUG(dev, "temperature table pointer invalid\n");
43 return;
44 }
45
46 /* Set the default sensor's contants */
47 sensor->offset_constant = 0;
48 sensor->offset_mult = 0;
49 sensor->offset_div = 1;
50 sensor->slope_mult = 1;
51 sensor->slope_div = 1;
52
53 /* Set the default temperature thresholds */
54 temps->critical = 110;
55 temps->down_clock = 100;
56 temps->fan_boost = 90;
57
58 /* Set the default range for the pwm fan */
59 pm->fan.min_duty = 30;
60 pm->fan.max_duty = 100;
61
62 /* Set the known default values to setup the temperature sensor */
63 if (dev_priv->card_type >= NV_40) {
64 switch (dev_priv->chipset) {
65 case 0x43:
66 sensor->offset_mult = 32060;
67 sensor->offset_div = 1000;
68 sensor->slope_mult = 792;
69 sensor->slope_div = 1000;
70 break;
71
72 case 0x44:
73 case 0x47:
74 case 0x4a:
75 sensor->offset_mult = 27839;
76 sensor->offset_div = 1000;
77 sensor->slope_mult = 780;
78 sensor->slope_div = 1000;
79 break;
80
81 case 0x46:
82 sensor->offset_mult = -24775;
83 sensor->offset_div = 100;
84 sensor->slope_mult = 467;
85 sensor->slope_div = 10000;
86 break;
87
88 case 0x49:
89 sensor->offset_mult = -25051;
90 sensor->offset_div = 100;
91 sensor->slope_mult = 458;
92 sensor->slope_div = 10000;
93 break;
94
95 case 0x4b:
96 sensor->offset_mult = -24088;
97 sensor->offset_div = 100;
98 sensor->slope_mult = 442;
99 sensor->slope_div = 10000;
100 break;
101
102 case 0x50:
103 sensor->offset_mult = -22749;
104 sensor->offset_div = 100;
105 sensor->slope_mult = 431;
106 sensor->slope_div = 10000;
107 break;
108
109 case 0x67:
110 sensor->offset_mult = -26149;
111 sensor->offset_div = 100;
112 sensor->slope_mult = 484;
113 sensor->slope_div = 10000;
114 break;
115 }
116 }
117
118 headerlen = temp[1];
119 recordlen = temp[2];
120 entries = temp[3];
121 temp = temp + headerlen;
122
123 /* Read the entries from the table */
124 for (i = 0; i < entries; i++) {
125 s16 value = ROM16(temp[1]);
126
127 switch (temp[0]) {
128 case 0x01:
129 if ((value & 0x8f) == 0)
130 sensor->offset_constant = (value >> 9) & 0x7f;
131 break;
132
133 case 0x04:
134 if ((value & 0xf00f) == 0xa000) /* core */
135 temps->critical = (value&0x0ff0) >> 4;
136 break;
137
138 case 0x07:
139 if ((value & 0xf00f) == 0xa000) /* core */
140 temps->down_clock = (value&0x0ff0) >> 4;
141 break;
142
143 case 0x08:
144 if ((value & 0xf00f) == 0xa000) /* core */
145 temps->fan_boost = (value&0x0ff0) >> 4;
146 break;
147
148 case 0x10:
149 sensor->offset_mult = value;
150 break;
151
152 case 0x11:
153 sensor->offset_div = value;
154 break;
155
156 case 0x12:
157 sensor->slope_mult = value;
158 break;
159
160 case 0x13:
161 sensor->slope_div = value;
162 break;
163 case 0x22:
164 pm->fan.min_duty = value & 0xff;
165 pm->fan.max_duty = (value & 0xff00) >> 8;
166 break;
167 case 0x26:
168 pm->fan.pwm_freq = value;
169 break;
170 }
171 temp += recordlen;
172 }
173
174 nouveau_temp_safety_checks(dev);
175
176 /* check the fan min/max settings */
177 if (pm->fan.min_duty < 10)
178 pm->fan.min_duty = 10;
179 if (pm->fan.max_duty > 100)
180 pm->fan.max_duty = 100;
181 if (pm->fan.max_duty < pm->fan.min_duty)
182 pm->fan.max_duty = pm->fan.min_duty;
183}
184
185static int
186nv40_sensor_setup(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
190 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
191 s32 offset = sensor->offset_mult / sensor->offset_div;
192 s32 sensor_calibration;
193
194 /* set up the sensors */
195 sensor_calibration = 120 - offset - sensor->offset_constant;
196 sensor_calibration = sensor_calibration * sensor->slope_div /
197 sensor->slope_mult;
198
199 if (dev_priv->chipset >= 0x46)
200 sensor_calibration |= 0x80000000;
201 else
202 sensor_calibration |= 0x10000000;
203
204 nv_wr32(dev, 0x0015b0, sensor_calibration);
205
206 /* Wait for the sensor to update */
207 msleep(5);
208
209 /* read */
210 return nv_rd32(dev, 0x0015b4) & 0x1fff;
211}
212
213int
214nv40_temp_get(struct drm_device *dev)
215{
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
218 struct nouveau_pm_temp_sensor_constants *sensor = &pm->sensor_constants;
219 int offset = sensor->offset_mult / sensor->offset_div;
220 int core_temp;
221
222 if (dev_priv->card_type >= NV_50) {
223 core_temp = nv_rd32(dev, 0x20008);
224 } else {
225 core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff;
226 /* Setup the sensor if the temperature is 0 */
227 if (core_temp == 0)
228 core_temp = nv40_sensor_setup(dev);
229 }
230
231 core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
232 core_temp = core_temp + offset + sensor->offset_constant;
233
234 return core_temp;
235}
236
237int
238nv84_temp_get(struct drm_device *dev)
239{
240 return nv_rd32(dev, 0x20400);
241}
242
243void
244nouveau_temp_safety_checks(struct drm_device *dev)
245{
246 struct drm_nouveau_private *dev_priv = dev->dev_private;
247 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
248 struct nouveau_pm_threshold_temp *temps = &pm->threshold_temp;
249
250 if (temps->critical > 120)
251 temps->critical = 120;
252 else if (temps->critical < 80)
253 temps->critical = 80;
254
255 if (temps->down_clock > 110)
256 temps->down_clock = 110;
257 else if (temps->down_clock < 60)
258 temps->down_clock = 60;
259
260 if (temps->fan_boost > 100)
261 temps->fan_boost = 100;
262 else if (temps->fan_boost < 40)
263 temps->fan_boost = 40;
264}
265
266static bool
267probe_monitoring_device(struct nouveau_i2c_chan *i2c,
268 struct i2c_board_info *info)
269{
270 struct i2c_client *client;
271
272 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
273
274 client = i2c_new_device(&i2c->adapter, info);
275 if (!client)
276 return false;
277
278 if (!client->driver || client->driver->detect(client, info)) {
279 i2c_unregister_device(client);
280 return false;
281 }
282
283 return true;
284}
285
286static void
287nouveau_temp_probe_i2c(struct drm_device *dev)
288{
289 struct i2c_board_info info[] = {
290 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
291 { I2C_BOARD_INFO("w83781d", 0x2d) },
292 { I2C_BOARD_INFO("adt7473", 0x2e) },
293 { I2C_BOARD_INFO("f75375", 0x2e) },
294 { I2C_BOARD_INFO("lm99", 0x4c) },
295 { }
296 };
297
298 nouveau_i2c_identify(dev, "monitoring device", info,
299 probe_monitoring_device, NV_I2C_DEFAULT(0));
300}
301
302void
303nouveau_temp_init(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nvbios *bios = &dev_priv->vbios;
307 struct bit_entry P;
308 u8 *temp = NULL;
309
310 if (bios->type == NVBIOS_BIT) {
311 if (bit_table(dev, 'P', &P))
312 return;
313
314 if (P.version == 1)
315 temp = ROMPTR(dev, P.data[12]);
316 else if (P.version == 2)
317 temp = ROMPTR(dev, P.data[16]);
318 else
319 NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
320
321 nouveau_temp_vbios_parse(dev, temp);
322 }
323
324 nouveau_temp_probe_i2c(dev);
325}
326
327void
328nouveau_temp_fini(struct drm_device *dev)
329{
330
331}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 48de8dd69583..9be9cb58e19b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,21 +24,253 @@
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */ 25 */
26 26
27#include <drm/drmP.h> 27#include <subdev/fb.h>
28#include <subdev/vm.h>
29#include <subdev/instmem.h>
28 30
29#include "nouveau_drv.h" 31#include "nouveau_drm.h"
32#include "nouveau_ttm.h"
33#include "nouveau_gem.h"
34
35static int
36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
37{
38 /* nothing to do */
39 return 0;
40}
41
42static int
43nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
44{
45 /* nothing to do */
46 return 0;
47}
48
49static inline void
50nouveau_mem_node_cleanup(struct nouveau_mem *node)
51{
52 if (node->vma[0].node) {
53 nouveau_vm_unmap(&node->vma[0]);
54 nouveau_vm_put(&node->vma[0]);
55 }
56
57 if (node->vma[1].node) {
58 nouveau_vm_unmap(&node->vma[1]);
59 nouveau_vm_put(&node->vma[1]);
60 }
61}
62
63static void
64nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
65 struct ttm_mem_reg *mem)
66{
67 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
68 struct nouveau_fb *pfb = nouveau_fb(drm->device);
69 nouveau_mem_node_cleanup(mem->mm_node);
70 pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
71}
72
73static int
74nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
75 struct ttm_buffer_object *bo,
76 struct ttm_placement *placement,
77 struct ttm_mem_reg *mem)
78{
79 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
80 struct nouveau_fb *pfb = nouveau_fb(drm->device);
81 struct nouveau_bo *nvbo = nouveau_bo(bo);
82 struct nouveau_mem *node;
83 u32 size_nc = 0;
84 int ret;
85
86 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
87 size_nc = 1 << nvbo->page_shift;
88
89 ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
90 mem->page_alignment << PAGE_SHIFT, size_nc,
91 (nvbo->tile_flags >> 8) & 0x3ff, &node);
92 if (ret) {
93 mem->mm_node = NULL;
94 return (ret == -ENOSPC) ? 0 : ret;
95 }
96
97 node->page_shift = nvbo->page_shift;
98
99 mem->mm_node = node;
100 mem->start = node->offset >> PAGE_SHIFT;
101 return 0;
102}
103
104static void
105nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
106{
107 struct nouveau_mm *mm = man->priv;
108 struct nouveau_mm_node *r;
109 u32 total = 0, free = 0;
110
111 mutex_lock(&mm->mutex);
112 list_for_each_entry(r, &mm->nodes, nl_entry) {
113 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
114 prefix, r->type, ((u64)r->offset << 12),
115 (((u64)r->offset + r->length) << 12));
116
117 total += r->length;
118 if (!r->type)
119 free += r->length;
120 }
121 mutex_unlock(&mm->mutex);
122
123 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
124 prefix, (u64)total << 12, (u64)free << 12);
125 printk(KERN_DEBUG "%s block: 0x%08x\n",
126 prefix, mm->block_size << 12);
127}
128
129const struct ttm_mem_type_manager_func nouveau_vram_manager = {
130 nouveau_vram_manager_init,
131 nouveau_vram_manager_fini,
132 nouveau_vram_manager_new,
133 nouveau_vram_manager_del,
134 nouveau_vram_manager_debug
135};
136
137static int
138nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
139{
140 return 0;
141}
142
143static int
144nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
145{
146 return 0;
147}
148
149static void
150nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
151 struct ttm_mem_reg *mem)
152{
153 nouveau_mem_node_cleanup(mem->mm_node);
154 kfree(mem->mm_node);
155 mem->mm_node = NULL;
156}
157
158static int
159nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
160 struct ttm_buffer_object *bo,
161 struct ttm_placement *placement,
162 struct ttm_mem_reg *mem)
163{
164 struct nouveau_mem *node;
165
166 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
167 return -ENOMEM;
168
169 node = kzalloc(sizeof(*node), GFP_KERNEL);
170 if (!node)
171 return -ENOMEM;
172 node->page_shift = 12;
173
174 mem->mm_node = node;
175 mem->start = 0;
176 return 0;
177}
178
179static void
180nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
181{
182}
183
184const struct ttm_mem_type_manager_func nouveau_gart_manager = {
185 nouveau_gart_manager_init,
186 nouveau_gart_manager_fini,
187 nouveau_gart_manager_new,
188 nouveau_gart_manager_del,
189 nouveau_gart_manager_debug
190};
191
192#include <core/subdev/vm/nv04.h>
193static int
194nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
195{
196 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
197 struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
198 struct nv04_vmmgr_priv *priv = (void *)vmm;
199 struct nouveau_vm *vm = NULL;
200 nouveau_vm_ref(priv->vm, &vm, NULL);
201 man->priv = vm;
202 return 0;
203}
204
205static int
206nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
207{
208 struct nouveau_vm *vm = man->priv;
209 nouveau_vm_ref(NULL, &vm, NULL);
210 man->priv = NULL;
211 return 0;
212}
213
214static void
215nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
216{
217 struct nouveau_mem *node = mem->mm_node;
218 if (node->vma[0].node)
219 nouveau_vm_put(&node->vma[0]);
220 kfree(mem->mm_node);
221 mem->mm_node = NULL;
222}
223
224static int
225nv04_gart_manager_new(struct ttm_mem_type_manager *man,
226 struct ttm_buffer_object *bo,
227 struct ttm_placement *placement,
228 struct ttm_mem_reg *mem)
229{
230 struct nouveau_mem *node;
231 int ret;
232
233 node = kzalloc(sizeof(*node), GFP_KERNEL);
234 if (!node)
235 return -ENOMEM;
236
237 node->page_shift = 12;
238
239 ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
240 NV_MEM_ACCESS_RW, &node->vma[0]);
241 if (ret) {
242 kfree(node);
243 return ret;
244 }
245
246 mem->mm_node = node;
247 mem->start = node->vma[0].offset >> PAGE_SHIFT;
248 return 0;
249}
250
251static void
252nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
253{
254}
255
256const struct ttm_mem_type_manager_func nv04_gart_manager = {
257 nv04_gart_manager_init,
258 nv04_gart_manager_fini,
259 nv04_gart_manager_new,
260 nv04_gart_manager_del,
261 nv04_gart_manager_debug
262};
30 263
31int 264int
32nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 265nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
33{ 266{
34 struct drm_file *file_priv = filp->private_data; 267 struct drm_file *file_priv = filp->private_data;
35 struct drm_nouveau_private *dev_priv = 268 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
36 file_priv->minor->dev->dev_private;
37 269
38 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 270 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
39 return drm_mmap(filp, vma); 271 return drm_mmap(filp, vma);
40 272
41 return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev); 273 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
42} 274}
43 275
44static int 276static int
@@ -54,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
54} 286}
55 287
56int 288int
57nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) 289nouveau_ttm_global_init(struct nouveau_drm *drm)
58{ 290{
59 struct drm_global_reference *global_ref; 291 struct drm_global_reference *global_ref;
60 int ret; 292 int ret;
61 293
62 global_ref = &dev_priv->ttm.mem_global_ref; 294 global_ref = &drm->ttm.mem_global_ref;
63 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 295 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
64 global_ref->size = sizeof(struct ttm_mem_global); 296 global_ref->size = sizeof(struct ttm_mem_global);
65 global_ref->init = &nouveau_ttm_mem_global_init; 297 global_ref->init = &nouveau_ttm_mem_global_init;
@@ -68,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
68 ret = drm_global_item_ref(global_ref); 300 ret = drm_global_item_ref(global_ref);
69 if (unlikely(ret != 0)) { 301 if (unlikely(ret != 0)) {
70 DRM_ERROR("Failed setting up TTM memory accounting\n"); 302 DRM_ERROR("Failed setting up TTM memory accounting\n");
71 dev_priv->ttm.mem_global_ref.release = NULL; 303 drm->ttm.mem_global_ref.release = NULL;
72 return ret; 304 return ret;
73 } 305 }
74 306
75 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; 307 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
76 global_ref = &dev_priv->ttm.bo_global_ref.ref; 308 global_ref = &drm->ttm.bo_global_ref.ref;
77 global_ref->global_type = DRM_GLOBAL_TTM_BO; 309 global_ref->global_type = DRM_GLOBAL_TTM_BO;
78 global_ref->size = sizeof(struct ttm_bo_global); 310 global_ref->size = sizeof(struct ttm_bo_global);
79 global_ref->init = &ttm_bo_global_init; 311 global_ref->init = &ttm_bo_global_init;
@@ -82,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
82 ret = drm_global_item_ref(global_ref); 314 ret = drm_global_item_ref(global_ref);
83 if (unlikely(ret != 0)) { 315 if (unlikely(ret != 0)) {
84 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 316 DRM_ERROR("Failed setting up TTM BO subsystem\n");
85 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 317 drm_global_item_unref(&drm->ttm.mem_global_ref);
86 dev_priv->ttm.mem_global_ref.release = NULL; 318 drm->ttm.mem_global_ref.release = NULL;
87 return ret; 319 return ret;
88 } 320 }
89 321
@@ -91,13 +323,101 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
91} 323}
92 324
93void 325void
94nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) 326nouveau_ttm_global_release(struct nouveau_drm *drm)
95{ 327{
96 if (dev_priv->ttm.mem_global_ref.release == NULL) 328 if (drm->ttm.mem_global_ref.release == NULL)
97 return; 329 return;
98 330
99 drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); 331 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
100 drm_global_item_unref(&dev_priv->ttm.mem_global_ref); 332 drm_global_item_unref(&drm->ttm.mem_global_ref);
101 dev_priv->ttm.mem_global_ref.release = NULL; 333 drm->ttm.mem_global_ref.release = NULL;
102} 334}
103 335
336int
337nouveau_ttm_init(struct nouveau_drm *drm)
338{
339 struct drm_device *dev = drm->dev;
340 u32 bits;
341 int ret;
342
343 bits = nouveau_vmmgr(drm->device)->dma_bits;
344 if ( drm->agp.stat == ENABLED ||
345 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
346 bits = 32;
347
348 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
349 if (ret)
350 return ret;
351
352 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
353 if (ret)
354 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
355
356 ret = nouveau_ttm_global_init(drm);
357 if (ret)
358 return ret;
359
360 ret = ttm_bo_device_init(&drm->ttm.bdev,
361 drm->ttm.bo_global_ref.ref.object,
362 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
363 bits <= 32 ? true : false);
364 if (ret) {
365 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
366 return ret;
367 }
368
369 /* VRAM init */
370 drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
371 drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
372
373 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
374 drm->gem.vram_available >> PAGE_SHIFT);
375 if (ret) {
376 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
377 return ret;
378 }
379
380 drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
381 pci_resource_len(dev->pdev, 1),
382 DRM_MTRR_WC);
383
384 /* GART init */
385 if (drm->agp.stat != ENABLED) {
386 drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
387 if (drm->gem.gart_available > 512 * 1024 * 1024)
388 drm->gem.gart_available = 512 * 1024 * 1024;
389 } else {
390 drm->gem.gart_available = drm->agp.size;
391 }
392
393 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
394 drm->gem.gart_available >> PAGE_SHIFT);
395 if (ret) {
396 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
397 return ret;
398 }
399
400 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
401 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
402 return 0;
403}
404
405void
406nouveau_ttm_fini(struct nouveau_drm *drm)
407{
408 mutex_lock(&drm->dev->struct_mutex);
409 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
410 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
411 mutex_unlock(&drm->dev->struct_mutex);
412
413 ttm_bo_device_release(&drm->ttm.bdev);
414
415 nouveau_ttm_global_release(drm);
416
417 if (drm->ttm.mtrr >= 0) {
418 drm_mtrr_del(drm->ttm.mtrr,
419 pci_resource_start(drm->dev->pdev, 1),
420 pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
421 drm->ttm.mtrr = -1;
422 }
423}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 000000000000..25b0de413352
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,25 @@
1#ifndef __NOUVEAU_TTM_H__
2#define __NOUVEAU_TTM_H__
3
4static inline struct nouveau_drm *
5nouveau_bdev(struct ttm_bo_device *bd)
6{
7 return container_of(bd, struct nouveau_drm, ttm.bdev);
8}
9
10extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
11extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
12extern const struct ttm_mem_type_manager_func nv04_gart_manager;
13
14struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
15 unsigned long size, u32 page_flags,
16 struct page *dummy_read_page);
17
18int nouveau_ttm_init(struct nouveau_drm *drm);
19void nouveau_ttm_fini(struct nouveau_drm *drm);
20int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
21
22int nouveau_ttm_global_init(struct nouveau_drm *);
23void nouveau_ttm_global_release(struct nouveau_drm *);
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
deleted file mode 100644
index b97719fbb739..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/*
2 * Copyright (C) 2010 Nouveau Project
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#ifndef __NOUVEAU_UTIL_H__
29#define __NOUVEAU_UTIL_H__
30
31struct nouveau_bitfield {
32 u32 mask;
33 const char *name;
34};
35
36struct nouveau_enum {
37 u32 value;
38 const char *name;
39 void *data;
40};
41
42void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
43void nouveau_enum_print(const struct nouveau_enum *, u32 value);
44const struct nouveau_enum *
45nouveau_enum_find(const struct nouveau_enum *, u32 value);
46
47int nouveau_ratelimit(void);
48
49#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
new file mode 100644
index 000000000000..6f0ac64873df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -0,0 +1,99 @@
1#include <linux/vgaarb.h>
2#include <linux/vga_switcheroo.h>
3
4#include <drm/drmP.h>
5#include <drm/drm_crtc_helper.h>
6
7#include "nouveau_drm.h"
8#include "nouveau_acpi.h"
9#include "nouveau_fbcon.h"
10#include "nouveau_vga.h"
11
12static unsigned int
13nouveau_vga_set_decode(void *priv, bool state)
14{
15 struct nouveau_device *device = nouveau_dev(priv);
16
17 if (device->chipset >= 0x40)
18 nv_wr32(device, 0x088054, state);
19 else
20 nv_wr32(device, 0x001854, state);
21
22 if (state)
23 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
24 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
25 else
26 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
27}
28
29static void
30nouveau_switcheroo_set_state(struct pci_dev *pdev,
31 enum vga_switcheroo_state state)
32{
33 struct drm_device *dev = pci_get_drvdata(pdev);
34 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
35
36 if (state == VGA_SWITCHEROO_ON) {
37 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
38 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
39 nouveau_drm_resume(pdev);
40 drm_kms_helper_poll_enable(dev);
41 dev->switch_power_state = DRM_SWITCH_POWER_ON;
42 } else {
43 printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
44 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
45 drm_kms_helper_poll_disable(dev);
46 nouveau_switcheroo_optimus_dsm();
47 nouveau_drm_suspend(pdev, pmm);
48 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
49 }
50}
51
52static void
53nouveau_switcheroo_reprobe(struct pci_dev *pdev)
54{
55 struct drm_device *dev = pci_get_drvdata(pdev);
56 nouveau_fbcon_output_poll_changed(dev);
57}
58
59static bool
60nouveau_switcheroo_can_switch(struct pci_dev *pdev)
61{
62 struct drm_device *dev = pci_get_drvdata(pdev);
63 bool can_switch;
64
65 spin_lock(&dev->count_lock);
66 can_switch = (dev->open_count == 0);
67 spin_unlock(&dev->count_lock);
68 return can_switch;
69}
70
71static const struct vga_switcheroo_client_ops
72nouveau_switcheroo_ops = {
73 .set_gpu_state = nouveau_switcheroo_set_state,
74 .reprobe = nouveau_switcheroo_reprobe,
75 .can_switch = nouveau_switcheroo_can_switch,
76};
77
78void
79nouveau_vga_init(struct nouveau_drm *drm)
80{
81 struct drm_device *dev = drm->dev;
82 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
83 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
84}
85
86void
87nouveau_vga_fini(struct nouveau_drm *drm)
88{
89 struct drm_device *dev = drm->dev;
90 vga_switcheroo_unregister_client(dev->pdev);
91 vga_client_register(dev->pdev, NULL, NULL, NULL);
92}
93
94
95void
96nouveau_vga_lastclose(struct drm_device *dev)
97{
98 vga_switcheroo_process_delayed_switch();
99}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
new file mode 100644
index 000000000000..ea3ad6974c65
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -0,0 +1,8 @@
1#ifndef __NOUVEAU_VGA_H__
2#define __NOUVEAU_VGA_H__
3
4void nouveau_vga_init(struct nouveau_drm *);
5void nouveau_vga_fini(struct nouveau_drm *);
6void nouveau_vga_lastclose(struct drm_device *dev);
7
8#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index fbc3a1efd501..9976414cbe50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -24,18 +24,21 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_gpio.h"
30 29
31static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 }; 30#include <subdev/bios/gpio.h>
31#include <subdev/gpio.h>
32
33static const enum dcb_gpio_func_name vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
32static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]); 34static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
33 35
34int 36int
35nouveau_voltage_gpio_get(struct drm_device *dev) 37nouveau_voltage_gpio_get(struct drm_device *dev)
36{ 38{
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 39 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
38 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 40 struct nouveau_device *device = nouveau_dev(dev);
41 struct nouveau_gpio *gpio = nouveau_gpio(device);
39 u8 vid = 0; 42 u8 vid = 0;
40 int i; 43 int i;
41 44
@@ -43,7 +46,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
43 if (!(volt->vid_mask & (1 << i))) 46 if (!(volt->vid_mask & (1 << i)))
44 continue; 47 continue;
45 48
46 vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i; 49 vid |= gpio->get(gpio, 0, vidtag[i], 0xff) << i;
47 } 50 }
48 51
49 return nouveau_volt_lvl_lookup(dev, vid); 52 return nouveau_volt_lvl_lookup(dev, vid);
@@ -52,8 +55,9 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
52int 55int
53nouveau_voltage_gpio_set(struct drm_device *dev, int voltage) 56nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
54{ 57{
55 struct drm_nouveau_private *dev_priv = dev->dev_private; 58 struct nouveau_device *device = nouveau_dev(dev);
56 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage; 59 struct nouveau_gpio *gpio = nouveau_gpio(device);
60 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
57 int vid, i; 61 int vid, i;
58 62
59 vid = nouveau_volt_vid_lookup(dev, voltage); 63 vid = nouveau_volt_vid_lookup(dev, voltage);
@@ -64,7 +68,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
64 if (!(volt->vid_mask & (1 << i))) 68 if (!(volt->vid_mask & (1 << i)))
65 continue; 69 continue;
66 70
67 nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i))); 71 gpio->set(gpio, 0, vidtag[i], 0xff, !!(vid & (1 << i)));
68 } 72 }
69 73
70 return 0; 74 return 0;
@@ -73,8 +77,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
73int 77int
74nouveau_volt_vid_lookup(struct drm_device *dev, int voltage) 78nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
75{ 79{
76 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
77 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
78 int i; 81 int i;
79 82
80 for (i = 0; i < volt->nr_level; i++) { 83 for (i = 0; i < volt->nr_level; i++) {
@@ -88,8 +91,7 @@ nouveau_volt_vid_lookup(struct drm_device *dev, int voltage)
88int 91int
89nouveau_volt_lvl_lookup(struct drm_device *dev, int vid) 92nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
90{ 93{
91 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
92 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
93 int i; 95 int i;
94 96
95 for (i = 0; i < volt->nr_level; i++) { 97 for (i = 0; i < volt->nr_level; i++) {
@@ -103,10 +105,12 @@ nouveau_volt_lvl_lookup(struct drm_device *dev, int vid)
103void 105void
104nouveau_volt_init(struct drm_device *dev) 106nouveau_volt_init(struct drm_device *dev)
105{ 107{
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 108 struct nouveau_drm *drm = nouveau_drm(dev);
107 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 109 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
110 struct nouveau_pm *pm = nouveau_pm(dev);
108 struct nouveau_pm_voltage *voltage = &pm->voltage; 111 struct nouveau_pm_voltage *voltage = &pm->voltage;
109 struct nvbios *bios = &dev_priv->vbios; 112 struct nvbios *bios = &drm->vbios;
113 struct dcb_gpio_func func;
110 struct bit_entry P; 114 struct bit_entry P;
111 u8 *volt = NULL, *entry; 115 u8 *volt = NULL, *entry;
112 int i, headerlen, recordlen, entries, vidmask, vidshift; 116 int i, headerlen, recordlen, entries, vidmask, vidshift;
@@ -121,11 +125,11 @@ nouveau_volt_init(struct drm_device *dev)
121 if (P.version == 2) 125 if (P.version == 2)
122 volt = ROMPTR(dev, P.data[12]); 126 volt = ROMPTR(dev, P.data[12]);
123 else { 127 else {
124 NV_WARN(dev, "unknown volt for BIT P %d\n", P.version); 128 NV_WARN(drm, "unknown volt for BIT P %d\n", P.version);
125 } 129 }
126 } else { 130 } else {
127 if (bios->data[bios->offset + 6] < 0x27) { 131 if (bios->data[bios->offset + 6] < 0x27) {
128 NV_DEBUG(dev, "BMP version too old for voltage\n"); 132 NV_DEBUG(drm, "BMP version too old for voltage\n");
129 return; 133 return;
130 } 134 }
131 135
@@ -133,7 +137,7 @@ nouveau_volt_init(struct drm_device *dev)
133 } 137 }
134 138
135 if (!volt) { 139 if (!volt) {
136 NV_DEBUG(dev, "voltage table pointer invalid\n"); 140 NV_DEBUG(drm, "voltage table pointer invalid\n");
137 return; 141 return;
138 } 142 }
139 143
@@ -177,7 +181,7 @@ nouveau_volt_init(struct drm_device *dev)
177 vidshift = 0; 181 vidshift = 0;
178 break; 182 break;
179 default: 183 default:
180 NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]); 184 NV_WARN(drm, "voltage table 0x%02x unknown\n", volt[0]);
181 return; 185 return;
182 } 186 }
183 187
@@ -189,12 +193,12 @@ nouveau_volt_init(struct drm_device *dev)
189 i = 0; 193 i = 0;
190 while (vidmask) { 194 while (vidmask) {
191 if (i > nr_vidtag) { 195 if (i > nr_vidtag) {
192 NV_DEBUG(dev, "vid bit %d unknown\n", i); 196 NV_DEBUG(drm, "vid bit %d unknown\n", i);
193 return; 197 return;
194 } 198 }
195 199
196 if (!nouveau_gpio_func_valid(dev, vidtag[i])) { 200 if (gpio && gpio->find(gpio, 0, vidtag[i], 0xff, &func)) {
197 NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i); 201 NV_DEBUG(drm, "vid bit %d has no gpio tag\n", i);
198 return; 202 return;
199 } 203 }
200 204
@@ -240,8 +244,7 @@ nouveau_volt_init(struct drm_device *dev)
240void 244void
241nouveau_volt_fini(struct drm_device *dev) 245nouveau_volt_fini(struct drm_device *dev)
242{ 246{
243 struct drm_nouveau_private *dev_priv = dev->dev_private; 247 struct nouveau_pm_voltage *volt = &nouveau_pm(dev)->voltage;
244 struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
245 248
246 kfree(volt->level); 249 kfree(volt->level);
247} 250}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 37d5b5bf7587..82a0d9c6cda3 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -26,14 +26,20 @@
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28 28
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h"
31#include "nouveau_bo.h"
32#include "nouveau_gem.h"
30#include "nouveau_encoder.h" 33#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 34#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
33#include "nouveau_fb.h"
34#include "nouveau_hw.h" 36#include "nouveau_hw.h"
35#include "nvreg.h" 37#include "nvreg.h"
36#include "nouveau_fbcon.h" 38#include "nouveau_fbcon.h"
39#include "nv04_display.h"
40
41#include <subdev/bios/pll.h>
42#include <subdev/clock.h>
37 43
38static int 44static int
39nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 45nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
@@ -49,8 +55,8 @@ crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int in
49static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level) 55static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
50{ 56{
51 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 57 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
52 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 58 struct drm_device *dev = crtc->dev;
53 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 59 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
54 60
55 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level; 61 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
56 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) { 62 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
@@ -64,8 +70,8 @@ static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
64static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level) 70static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
65{ 71{
66 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 72 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
67 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 73 struct drm_device *dev = crtc->dev;
68 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 74 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
69 75
70 nv_crtc->sharpness = level; 76 nv_crtc->sharpness = level;
71 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */ 77 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
@@ -103,14 +109,17 @@ static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
103static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock) 109static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
104{ 110{
105 struct drm_device *dev = crtc->dev; 111 struct drm_device *dev = crtc->dev;
106 struct drm_nouveau_private *dev_priv = dev->dev_private; 112 struct nouveau_drm *drm = nouveau_drm(dev);
113 struct nouveau_bios *bios = nouveau_bios(drm->device);
114 struct nouveau_clock *clk = nouveau_clock(drm->device);
107 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 115 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
108 struct nv04_mode_state *state = &dev_priv->mode_reg; 116 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
109 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; 117 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
110 struct nouveau_pll_vals *pv = &regp->pllvals; 118 struct nouveau_pll_vals *pv = &regp->pllvals;
111 struct pll_lims pll_lim; 119 struct nvbios_pll pll_lim;
112 120
113 if (get_pll_limits(dev, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0, &pll_lim)) 121 if (nvbios_pll_parse(bios, nv_crtc->index ? PLL_VPLL1 : PLL_VPLL0,
122 &pll_lim))
114 return; 123 return;
115 124
116 /* NM2 == 0 is used to determine single stage mode on two stage plls */ 125 /* NM2 == 0 is used to determine single stage mode on two stage plls */
@@ -126,28 +135,29 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
126 * has yet been observed in allowing the use a single stage pll on all 135 * has yet been observed in allowing the use a single stage pll on all
127 * nv43 however. the behaviour of single stage use is untested on nv40 136 * nv43 however. the behaviour of single stage use is untested on nv40
128 */ 137 */
129 if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2)) 138 if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
130 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); 139 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
131 140
132 if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv)) 141
142 if (!clk->pll_calc(clk, &pll_lim, dot_clock, pv))
133 return; 143 return;
134 144
135 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; 145 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
136 146
137 /* The blob uses this always, so let's do the same */ 147 /* The blob uses this always, so let's do the same */
138 if (dev_priv->card_type == NV_40) 148 if (nv_device(drm->device)->card_type == NV_40)
139 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; 149 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
140 /* again nv40 and some nv43 act more like nv3x as described above */ 150 /* again nv40 and some nv43 act more like nv3x as described above */
141 if (dev_priv->chipset < 0x41) 151 if (nv_device(drm->device)->chipset < 0x41)
142 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | 152 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
143 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; 153 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
144 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; 154 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
145 155
146 if (pv->NM2) 156 if (pv->NM2)
147 NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", 157 NV_DEBUG(drm, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
148 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); 158 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
149 else 159 else
150 NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n", 160 NV_DEBUG(drm, "vpll: n %d m %d log2p %d\n",
151 pv->N1, pv->M1, pv->log2P); 161 pv->N1, pv->M1, pv->log2P);
152 162
153 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); 163 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
@@ -158,10 +168,11 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
158{ 168{
159 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 169 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
160 struct drm_device *dev = crtc->dev; 170 struct drm_device *dev = crtc->dev;
171 struct nouveau_drm *drm = nouveau_drm(dev);
161 unsigned char seq1 = 0, crtc17 = 0; 172 unsigned char seq1 = 0, crtc17 = 0;
162 unsigned char crtc1A; 173 unsigned char crtc1A;
163 174
164 NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode, 175 NV_DEBUG(drm, "Setting dpms mode %d on CRTC %d\n", mode,
165 nv_crtc->index); 176 nv_crtc->index);
166 177
167 if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */ 178 if (nv_crtc->last_dpms == mode) /* Don't do unnecessary mode changes. */
@@ -225,9 +236,8 @@ static void
225nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) 236nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
226{ 237{
227 struct drm_device *dev = crtc->dev; 238 struct drm_device *dev = crtc->dev;
228 struct drm_nouveau_private *dev_priv = dev->dev_private;
229 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 239 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
230 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 240 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
231 struct drm_framebuffer *fb = crtc->fb; 241 struct drm_framebuffer *fb = crtc->fb;
232 242
233 /* Calculate our timings */ 243 /* Calculate our timings */
@@ -251,8 +261,8 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
251 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 261 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
252 262
253 if (encoder->crtc == crtc && 263 if (encoder->crtc == crtc &&
254 (nv_encoder->dcb->type == OUTPUT_LVDS || 264 (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
255 nv_encoder->dcb->type == OUTPUT_TMDS)) 265 nv_encoder->dcb->type == DCB_OUTPUT_TMDS))
256 fp_output = true; 266 fp_output = true;
257 } 267 }
258 268
@@ -264,7 +274,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
264 horizEnd = horizTotal - 2; 274 horizEnd = horizTotal - 2;
265 horizBlankEnd = horizTotal + 4; 275 horizBlankEnd = horizTotal + 4;
266#if 0 276#if 0
267 if (dev->overlayAdaptor && dev_priv->card_type >= NV_10) 277 if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
268 /* This reportedly works around some video overlay bandwidth problems */ 278 /* This reportedly works around some video overlay bandwidth problems */
269 horizTotal += 2; 279 horizTotal += 2;
270#endif 280#endif
@@ -452,10 +462,10 @@ static void
452nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) 462nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
453{ 463{
454 struct drm_device *dev = crtc->dev; 464 struct drm_device *dev = crtc->dev;
455 struct drm_nouveau_private *dev_priv = dev->dev_private; 465 struct nouveau_drm *drm = nouveau_drm(dev);
456 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 466 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
457 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 467 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
458 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; 468 struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
459 struct drm_encoder *encoder; 469 struct drm_encoder *encoder;
460 bool lvds_output = false, tmds_output = false, tv_output = false, 470 bool lvds_output = false, tmds_output = false, tv_output = false,
461 off_chip_digital = false; 471 off_chip_digital = false;
@@ -467,11 +477,11 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
467 if (encoder->crtc != crtc) 477 if (encoder->crtc != crtc)
468 continue; 478 continue;
469 479
470 if (nv_encoder->dcb->type == OUTPUT_LVDS) 480 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
471 digital = lvds_output = true; 481 digital = lvds_output = true;
472 if (nv_encoder->dcb->type == OUTPUT_TV) 482 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
473 tv_output = true; 483 tv_output = true;
474 if (nv_encoder->dcb->type == OUTPUT_TMDS) 484 if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
475 digital = tmds_output = true; 485 digital = tmds_output = true;
476 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital) 486 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
477 off_chip_digital = true; 487 off_chip_digital = true;
@@ -500,7 +510,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
500 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | 510 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
501 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | 511 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
502 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; 512 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
503 if (dev_priv->chipset >= 0x11) 513 if (nv_device(drm->device)->chipset >= 0x11)
504 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; 514 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
505 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 515 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
506 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; 516 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -533,7 +543,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
533 543
534 /* The blob seems to take the current value from crtc 0, add 4 to that 544 /* The blob seems to take the current value from crtc 0, add 4 to that
535 * and reuse the old value for crtc 1 */ 545 * and reuse the old value for crtc 1 */
536 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY]; 546 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
537 if (!nv_crtc->index) 547 if (!nv_crtc->index)
538 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4; 548 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
539 549
@@ -541,26 +551,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
541 * 1 << 30 on 0x60.830), for no apparent reason */ 551 * 1 << 30 on 0x60.830), for no apparent reason */
542 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; 552 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
543 553
544 if (dev_priv->card_type >= NV_30) 554 if (nv_device(drm->device)->card_type >= NV_30)
545 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; 555 regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
546 556
547 regp->crtc_830 = mode->crtc_vdisplay - 3; 557 regp->crtc_830 = mode->crtc_vdisplay - 3;
548 regp->crtc_834 = mode->crtc_vdisplay - 1; 558 regp->crtc_834 = mode->crtc_vdisplay - 1;
549 559
550 if (dev_priv->card_type == NV_40) 560 if (nv_device(drm->device)->card_type == NV_40)
551 /* This is what the blob does */ 561 /* This is what the blob does */
552 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); 562 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
553 563
554 if (dev_priv->card_type >= NV_30) 564 if (nv_device(drm->device)->card_type >= NV_30)
555 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); 565 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
556 566
557 if (dev_priv->card_type >= NV_10) 567 if (nv_device(drm->device)->card_type >= NV_10)
558 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; 568 regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
559 else 569 else
560 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; 570 regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
561 571
562 /* Some misc regs */ 572 /* Some misc regs */
563 if (dev_priv->card_type == NV_40) { 573 if (nv_device(drm->device)->card_type == NV_40) {
564 regp->CRTC[NV_CIO_CRE_85] = 0xFF; 574 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
565 regp->CRTC[NV_CIO_CRE_86] = 0x1; 575 regp->CRTC[NV_CIO_CRE_86] = 0x1;
566 } 576 }
@@ -572,7 +582,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
572 582
573 /* Generic PRAMDAC regs */ 583 /* Generic PRAMDAC regs */
574 584
575 if (dev_priv->card_type >= NV_10) 585 if (nv_device(drm->device)->card_type >= NV_10)
576 /* Only bit that bios and blob set. */ 586 /* Only bit that bios and blob set. */
577 regp->nv10_cursync = (1 << 25); 587 regp->nv10_cursync = (1 << 25);
578 588
@@ -581,7 +591,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
581 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; 591 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
582 if (crtc->fb->depth == 16) 592 if (crtc->fb->depth == 16)
583 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; 593 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
584 if (dev_priv->chipset >= 0x11) 594 if (nv_device(drm->device)->chipset >= 0x11)
585 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; 595 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
586 596
587 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ 597 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -611,9 +621,9 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
611{ 621{
612 struct drm_device *dev = crtc->dev; 622 struct drm_device *dev = crtc->dev;
613 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
614 struct drm_nouveau_private *dev_priv = dev->dev_private; 624 struct nouveau_drm *drm = nouveau_drm(dev);
615 625
616 NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); 626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
617 drm_mode_debug_printmodeline(adjusted_mode); 627 drm_mode_debug_printmodeline(adjusted_mode);
618 628
619 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
@@ -621,8 +631,8 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
621 631
622 nv_crtc_mode_set_vga(crtc, adjusted_mode); 632 nv_crtc_mode_set_vga(crtc, adjusted_mode);
623 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ 633 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
624 if (dev_priv->card_type == NV_40) 634 if (nv_device(drm->device)->card_type == NV_40)
625 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); 635 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
626 nv_crtc_mode_set_regs(crtc, adjusted_mode); 636 nv_crtc_mode_set_regs(crtc, adjusted_mode);
627 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); 637 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
628 return 0; 638 return 0;
@@ -631,10 +641,10 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
631static void nv_crtc_save(struct drm_crtc *crtc) 641static void nv_crtc_save(struct drm_crtc *crtc)
632{ 642{
633 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 643 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
634 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 644 struct drm_device *dev = crtc->dev;
635 struct nv04_mode_state *state = &dev_priv->mode_reg; 645 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
636 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index]; 646 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
637 struct nv04_mode_state *saved = &dev_priv->saved_reg; 647 struct nv04_mode_state *saved = &nv04_display(dev)->saved_reg;
638 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index]; 648 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
639 649
640 if (nv_two_heads(crtc->dev)) 650 if (nv_two_heads(crtc->dev))
@@ -652,14 +662,14 @@ static void nv_crtc_save(struct drm_crtc *crtc)
652static void nv_crtc_restore(struct drm_crtc *crtc) 662static void nv_crtc_restore(struct drm_crtc *crtc)
653{ 663{
654 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 664 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
655 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 665 struct drm_device *dev = crtc->dev;
656 int head = nv_crtc->index; 666 int head = nv_crtc->index;
657 uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21]; 667 uint8_t saved_cr21 = nv04_display(dev)->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
658 668
659 if (nv_two_heads(crtc->dev)) 669 if (nv_two_heads(crtc->dev))
660 NVSetOwner(crtc->dev, head); 670 NVSetOwner(crtc->dev, head);
661 671
662 nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg); 672 nouveau_hw_load_state(crtc->dev, head, &nv04_display(dev)->saved_reg);
663 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21); 673 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
664 674
665 nv_crtc->last_dpms = NV_DPMS_CLEARED; 675 nv_crtc->last_dpms = NV_DPMS_CLEARED;
@@ -668,7 +678,7 @@ static void nv_crtc_restore(struct drm_crtc *crtc)
668static void nv_crtc_prepare(struct drm_crtc *crtc) 678static void nv_crtc_prepare(struct drm_crtc *crtc)
669{ 679{
670 struct drm_device *dev = crtc->dev; 680 struct drm_device *dev = crtc->dev;
671 struct drm_nouveau_private *dev_priv = dev->dev_private; 681 struct nouveau_drm *drm = nouveau_drm(dev);
672 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 682 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
673 struct drm_crtc_helper_funcs *funcs = crtc->helper_private; 683 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
674 684
@@ -682,7 +692,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
682 692
683 /* Some more preparation. */ 693 /* Some more preparation. */
684 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); 694 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
685 if (dev_priv->card_type == NV_40) { 695 if (nv_device(drm->device)->card_type == NV_40) {
686 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); 696 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
687 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); 697 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
688 } 698 }
@@ -692,10 +702,9 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
692{ 702{
693 struct drm_device *dev = crtc->dev; 703 struct drm_device *dev = crtc->dev;
694 struct drm_crtc_helper_funcs *funcs = crtc->helper_private; 704 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
695 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
696 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 705 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
697 706
698 nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg); 707 nouveau_hw_load_state(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
699 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL); 708 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
700 709
701#ifdef __BIG_ENDIAN 710#ifdef __BIG_ENDIAN
@@ -715,8 +724,6 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
715{ 724{
716 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
717 726
718 NV_DEBUG_KMS(crtc->dev, "\n");
719
720 if (!nv_crtc) 727 if (!nv_crtc)
721 return; 728 return;
722 729
@@ -732,18 +739,17 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
732{ 739{
733 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 740 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
734 struct drm_device *dev = nv_crtc->base.dev; 741 struct drm_device *dev = nv_crtc->base.dev;
735 struct drm_nouveau_private *dev_priv = dev->dev_private;
736 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs; 742 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
737 int i; 743 int i;
738 744
739 rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC; 745 rgbs = (struct rgb *)nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].DAC;
740 for (i = 0; i < 256; i++) { 746 for (i = 0; i < 256; i++) {
741 rgbs[i].r = nv_crtc->lut.r[i] >> 8; 747 rgbs[i].r = nv_crtc->lut.r[i] >> 8;
742 rgbs[i].g = nv_crtc->lut.g[i] >> 8; 748 rgbs[i].g = nv_crtc->lut.g[i] >> 8;
743 rgbs[i].b = nv_crtc->lut.b[i] >> 8; 749 rgbs[i].b = nv_crtc->lut.b[i] >> 8;
744 } 750 }
745 751
746 nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg); 752 nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
747} 753}
748 754
749static void 755static void
@@ -779,18 +785,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
779{ 785{
780 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 786 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
781 struct drm_device *dev = crtc->dev; 787 struct drm_device *dev = crtc->dev;
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 788 struct nouveau_drm *drm = nouveau_drm(dev);
783 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 789 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
784 struct drm_framebuffer *drm_fb; 790 struct drm_framebuffer *drm_fb;
785 struct nouveau_framebuffer *fb; 791 struct nouveau_framebuffer *fb;
786 int arb_burst, arb_lwm; 792 int arb_burst, arb_lwm;
787 int ret; 793 int ret;
788 794
789 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 795 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
790 796
791 /* no fb bound */ 797 /* no fb bound */
792 if (!atomic && !crtc->fb) { 798 if (!atomic && !crtc->fb) {
793 NV_DEBUG_KMS(dev, "No FB bound\n"); 799 NV_DEBUG(drm, "No FB bound\n");
794 return 0; 800 return 0;
795 } 801 }
796 802
@@ -858,7 +864,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
858 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); 864 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
859 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); 865 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
860 866
861 if (dev_priv->card_type >= NV_20) { 867 if (nv_device(drm->device)->card_type >= NV_20) {
862 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; 868 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
863 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); 869 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
864 } 870 }
@@ -878,8 +884,8 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
878 struct drm_framebuffer *fb, 884 struct drm_framebuffer *fb,
879 int x, int y, enum mode_set_atomic state) 885 int x, int y, enum mode_set_atomic state)
880{ 886{
881 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 887 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
882 struct drm_device *dev = dev_priv->dev; 888 struct drm_device *dev = drm->dev;
883 889
884 if (state == ENTER_ATOMIC_MODE_SET) 890 if (state == ENTER_ATOMIC_MODE_SET)
885 nouveau_fbcon_save_disable_accel(dev); 891 nouveau_fbcon_save_disable_accel(dev);
@@ -934,9 +940,9 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
934 940
935#ifdef __BIG_ENDIAN 941#ifdef __BIG_ENDIAN
936 { 942 {
937 struct drm_nouveau_private *dev_priv = dev->dev_private; 943 struct nouveau_drm *drm = nouveau_drm(dev);
938 944
939 if (dev_priv->chipset == 0x11) { 945 if (nv_device(drm->device)->chipset == 0x11) {
940 pixel = ((pixel & 0x000000ff) << 24) | 946 pixel = ((pixel & 0x000000ff) << 24) |
941 ((pixel & 0x0000ff00) << 8) | 947 ((pixel & 0x0000ff00) << 8) |
942 ((pixel & 0x00ff0000) >> 8) | 948 ((pixel & 0x00ff0000) >> 8) |
@@ -953,8 +959,8 @@ static int
953nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 959nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
954 uint32_t buffer_handle, uint32_t width, uint32_t height) 960 uint32_t buffer_handle, uint32_t width, uint32_t height)
955{ 961{
956 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 962 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
957 struct drm_device *dev = dev_priv->dev; 963 struct drm_device *dev = drm->dev;
958 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 964 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
959 struct nouveau_bo *cursor = NULL; 965 struct nouveau_bo *cursor = NULL;
960 struct drm_gem_object *gem; 966 struct drm_gem_object *gem;
@@ -977,7 +983,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
977 if (ret) 983 if (ret)
978 goto out; 984 goto out;
979 985
980 if (dev_priv->chipset >= 0x11) 986 if (nv_device(drm->device)->chipset >= 0x11)
981 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 987 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
982 else 988 else
983 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); 989 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
index 6463870ef19e..fe86f0de348f 100644
--- a/drivers/gpu/drm/nouveau/nv04_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -1,6 +1,7 @@
1#include <drm/drmP.h> 1#include <drm/drmP.h>
2#include <drm/drm_mode.h>
3#include "nouveau_drm.h"
2#include "nouveau_reg.h" 4#include "nouveau_reg.h"
3#include "nouveau_drv.h"
4#include "nouveau_crtc.h" 5#include "nouveau_crtc.h"
5#include "nouveau_hw.h" 6#include "nouveau_hw.h"
6 7
@@ -37,8 +38,8 @@ static void
37nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) 38nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
38{ 39{
39 struct drm_device *dev = nv_crtc->base.dev; 40 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private; 41 struct nouveau_drm *drm = nouveau_drm(dev);
41 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 42 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
42 struct drm_crtc *crtc = &nv_crtc->base; 43 struct drm_crtc *crtc = &nv_crtc->base;
43 44
44 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] = 45 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
@@ -54,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
54 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); 55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); 56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); 57 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
57 if (dev_priv->card_type == NV_40) 58 if (nv_device(drm->device)->card_type == NV_40)
58 nv_fix_nv40_hw_cursor(dev, nv_crtc->index); 59 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
59} 60}
60 61
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 981e6d4f4c76..347a3bd78d04 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -27,22 +27,25 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
34#include "nouveau_hw.h" 34#include "nouveau_hw.h"
35#include "nouveau_gpio.h"
36#include "nvreg.h" 35#include "nvreg.h"
37 36
37#include <subdev/bios/gpio.h>
38#include <subdev/gpio.h>
39#include <subdev/timer.h>
40
38int nv04_dac_output_offset(struct drm_encoder *encoder) 41int nv04_dac_output_offset(struct drm_encoder *encoder)
39{ 42{
40 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 43 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
41 int offset = 0; 44 int offset = 0;
42 45
43 if (dcb->or & (8 | OUTPUT_C)) 46 if (dcb->or & (8 | DCB_OUTPUT_C))
44 offset += 0x68; 47 offset += 0x68;
45 if (dcb->or & (8 | OUTPUT_B)) 48 if (dcb->or & (8 | DCB_OUTPUT_B))
46 offset += 0x2000; 49 offset += 0x2000;
47 50
48 return offset; 51 return offset;
@@ -62,6 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
62 65
63static int sample_load_twice(struct drm_device *dev, bool sense[2]) 66static int sample_load_twice(struct drm_device *dev, bool sense[2])
64{ 67{
68 struct nouveau_device *device = nouveau_dev(dev);
69 struct nouveau_timer *ptimer = nouveau_timer(device);
65 int i; 70 int i;
66 71
67 for (i = 0; i < 2; i++) { 72 for (i = 0; i < 2; i++) {
@@ -75,27 +80,30 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
75 * use a 10ms timeout (guards against crtc being inactive, in 80 * use a 10ms timeout (guards against crtc being inactive, in
76 * which case blank state would never change) 81 * which case blank state would never change)
77 */ 82 */
78 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 83 if (!nouveau_timer_wait_eq(ptimer, 10000000,
79 0x00000001, 0x00000000)) 84 NV_PRMCIO_INP0__COLOR,
85 0x00000001, 0x00000000))
80 return -EBUSY; 86 return -EBUSY;
81 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 87 if (!nouveau_timer_wait_eq(ptimer, 10000000,
82 0x00000001, 0x00000001)) 88 NV_PRMCIO_INP0__COLOR,
89 0x00000001, 0x00000001))
83 return -EBUSY; 90 return -EBUSY;
84 if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, 91 if (!nouveau_timer_wait_eq(ptimer, 10000000,
85 0x00000001, 0x00000000)) 92 NV_PRMCIO_INP0__COLOR,
93 0x00000001, 0x00000000))
86 return -EBUSY; 94 return -EBUSY;
87 95
88 udelay(100); 96 udelay(100);
89 /* when level triggers, sense is _LO_ */ 97 /* when level triggers, sense is _LO_ */
90 sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 98 sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
91 99
92 /* take another reading until it agrees with sense_a... */ 100 /* take another reading until it agrees with sense_a... */
93 do { 101 do {
94 udelay(100); 102 udelay(100);
95 sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 103 sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
96 if (sense_a != sense_b) { 104 if (sense_a != sense_b) {
97 sense_b_prime = 105 sense_b_prime =
98 nv_rd08(dev, NV_PRMCIO_INP0) & 0x10; 106 nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
99 if (sense_b == sense_b_prime) { 107 if (sense_b == sense_b_prime) {
100 /* ... unless two consecutive subsequent 108 /* ... unless two consecutive subsequent
101 * samples agree; sense_a is replaced */ 109 * samples agree; sense_a is replaced */
@@ -120,6 +128,8 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
120 struct drm_connector *connector) 128 struct drm_connector *connector)
121{ 129{
122 struct drm_device *dev = encoder->dev; 130 struct drm_device *dev = encoder->dev;
131 struct nouveau_device *device = nouveau_dev(dev);
132 struct nouveau_drm *drm = nouveau_drm(dev);
123 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; 133 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
124 uint8_t saved_palette0[3], saved_palette_mask; 134 uint8_t saved_palette0[3], saved_palette_mask;
125 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 135 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
@@ -154,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
154 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX); 164 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
155 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0); 165 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
156 166
157 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0); 167 nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
158 for (i = 0; i < 3; i++) 168 for (i = 0; i < 3; i++)
159 saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA); 169 saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
160 saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK); 170 saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
161 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0); 171 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
162 172
163 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL); 173 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
164 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, 174 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -171,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
171 do { 181 do {
172 bool sense_pair[2]; 182 bool sense_pair[2];
173 183
174 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 184 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
175 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); 185 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
176 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0); 186 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
177 /* testing blue won't find monochrome monitors. I don't care */ 187 /* testing blue won't find monochrome monitors. I don't care */
178 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue); 188 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
179 189
180 i = 0; 190 i = 0;
181 /* take sample pairs until both samples in the pair agree */ 191 /* take sample pairs until both samples in the pair agree */
@@ -198,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
198 } while (++blue < 0x18 && sense); 208 } while (++blue < 0x18 && sense);
199 209
200out: 210out:
201 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask); 211 nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
202 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl); 212 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
203 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0); 213 nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
204 for (i = 0; i < 3; i++) 214 for (i = 0; i < 3; i++)
205 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]); 215 nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
206 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl); 216 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 217 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
208 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 218 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -210,7 +220,7 @@ out:
210 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode); 220 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
211 221
212 if (blue == 0x18) { 222 if (blue == 0x18) {
213 NV_INFO(dev, "Load detected on head A\n"); 223 NV_INFO(drm, "Load detected on head A\n");
214 return connector_status_connected; 224 return connector_status_connected;
215 } 225 }
216 226
@@ -220,43 +230,46 @@ out:
220uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) 230uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
221{ 231{
222 struct drm_device *dev = encoder->dev; 232 struct drm_device *dev = encoder->dev;
223 struct drm_nouveau_private *dev_priv = dev->dev_private; 233 struct nouveau_drm *drm = nouveau_drm(dev);
224 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 234 struct nouveau_device *device = nouveau_dev(dev);
235 struct nouveau_gpio *gpio = nouveau_gpio(device);
236 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
225 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); 237 uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
226 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, 238 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
227 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput; 239 saved_rtest_ctrl, saved_gpio0 = 0, saved_gpio1 = 0, temp, routput;
228 int head; 240 int head;
229 241
230#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 242#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
231 if (dcb->type == OUTPUT_TV) { 243 if (dcb->type == DCB_OUTPUT_TV) {
232 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); 244 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
233 245
234 if (dev_priv->vbios.tvdactestval) 246 if (drm->vbios.tvdactestval)
235 testval = dev_priv->vbios.tvdactestval; 247 testval = drm->vbios.tvdactestval;
236 } else { 248 } else {
237 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ 249 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
238 250
239 if (dev_priv->vbios.dactestval) 251 if (drm->vbios.dactestval)
240 testval = dev_priv->vbios.dactestval; 252 testval = drm->vbios.dactestval;
241 } 253 }
242 254
243 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); 255 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
244 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 256 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
245 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF); 257 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
246 258
247 saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2); 259 saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
248 260
249 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff); 261 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
250 if (regoffset == 0x68) { 262 if (regoffset == 0x68) {
251 saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4); 263 saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
252 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); 264 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
253 } 265 }
254 266
255 saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); 267 if (gpio) {
256 saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); 268 saved_gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
257 269 saved_gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
258 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); 270 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, dcb->type == DCB_OUTPUT_TV);
259 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); 271 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, dcb->type == DCB_OUTPUT_TV);
272 }
260 273
261 msleep(4); 274 msleep(4);
262 275
@@ -270,8 +283,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
270 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ 283 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
271 routput = (saved_routput & 0xfffffece) | head << 8; 284 routput = (saved_routput & 0xfffffece) | head << 8;
272 285
273 if (dev_priv->card_type >= NV_40) { 286 if (nv_device(drm->device)->card_type >= NV_40) {
274 if (dcb->type == OUTPUT_TV) 287 if (dcb->type == DCB_OUTPUT_TV)
275 routput |= 0x1a << 16; 288 routput |= 0x1a << 16;
276 else 289 else
277 routput &= ~(0x1a << 16); 290 routput &= ~(0x1a << 16);
@@ -303,11 +316,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
303 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput); 316 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
304 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl); 317 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
305 if (regoffset == 0x68) 318 if (regoffset == 0x68)
306 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); 319 nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
307 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); 320 nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
308 321
309 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); 322 if (gpio) {
310 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); 323 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
324 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, saved_gpio0);
325 }
311 326
312 return sample; 327 return sample;
313} 328}
@@ -315,15 +330,15 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
315static enum drm_connector_status 330static enum drm_connector_status
316nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 331nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
317{ 332{
318 struct drm_device *dev = encoder->dev; 333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
319 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 334 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
320 335
321 if (nv04_dac_in_use(encoder)) 336 if (nv04_dac_in_use(encoder))
322 return connector_status_disconnected; 337 return connector_status_disconnected;
323 338
324 if (nv17_dac_sample_load(encoder) & 339 if (nv17_dac_sample_load(encoder) &
325 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { 340 NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
326 NV_INFO(dev, "Load detected on output %c\n", 341 NV_INFO(drm, "Load detected on output %c\n",
327 '@' + ffs(dcb->or)); 342 '@' + ffs(dcb->or));
328 return connector_status_connected; 343 return connector_status_connected;
329 } else { 344 } else {
@@ -357,7 +372,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
357 struct drm_display_mode *adjusted_mode) 372 struct drm_display_mode *adjusted_mode)
358{ 373{
359 struct drm_device *dev = encoder->dev; 374 struct drm_device *dev = encoder->dev;
360 struct drm_nouveau_private *dev_priv = dev->dev_private; 375 struct nouveau_drm *drm = nouveau_drm(dev);
361 int head = nouveau_crtc(encoder->crtc)->index; 376 int head = nouveau_crtc(encoder->crtc)->index;
362 377
363 if (nv_gf4_disp_arch(dev)) { 378 if (nv_gf4_disp_arch(dev)) {
@@ -372,7 +387,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
372 /* force any other vga encoders to bind to the other crtc */ 387 /* force any other vga encoders to bind to the other crtc */
373 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) { 388 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
374 if (rebind == encoder 389 if (rebind == encoder
375 || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG) 390 || nouveau_encoder(rebind)->dcb->type != DCB_OUTPUT_ANALOG)
376 continue; 391 continue;
377 392
378 dac_offset = nv04_dac_output_offset(rebind); 393 dac_offset = nv04_dac_output_offset(rebind);
@@ -383,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
383 } 398 }
384 399
385 /* This could use refinement for flatpanels, but it should work this way */ 400 /* This could use refinement for flatpanels, but it should work this way */
386 if (dev_priv->chipset < 0x44) 401 if (nv_device(drm->device)->chipset < 0x44)
387 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 402 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
388 else 403 else
389 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 404 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -392,13 +407,13 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
392static void nv04_dac_commit(struct drm_encoder *encoder) 407static void nv04_dac_commit(struct drm_encoder *encoder)
393{ 408{
394 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 409 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
395 struct drm_device *dev = encoder->dev; 410 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
396 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 411 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
397 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 412 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
398 413
399 helper->dpms(encoder, DRM_MODE_DPMS_ON); 414 helper->dpms(encoder, DRM_MODE_DPMS_ON);
400 415
401 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 416 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
402 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 417 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
403 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 418 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
404} 419}
@@ -406,11 +421,10 @@ static void nv04_dac_commit(struct drm_encoder *encoder)
406void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) 421void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
407{ 422{
408 struct drm_device *dev = encoder->dev; 423 struct drm_device *dev = encoder->dev;
409 struct drm_nouveau_private *dev_priv = dev->dev_private; 424 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
410 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
411 425
412 if (nv_gf4_disp_arch(dev)) { 426 if (nv_gf4_disp_arch(dev)) {
413 uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1]; 427 uint32_t *dac_users = &nv04_display(dev)->dac_users[ffs(dcb->or) - 1];
414 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); 428 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
415 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off); 429 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
416 430
@@ -431,23 +445,23 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
431 * someone else. */ 445 * someone else. */
432bool nv04_dac_in_use(struct drm_encoder *encoder) 446bool nv04_dac_in_use(struct drm_encoder *encoder)
433{ 447{
434 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; 448 struct drm_device *dev = encoder->dev;
435 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 449 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
436 450
437 return nv_gf4_disp_arch(encoder->dev) && 451 return nv_gf4_disp_arch(encoder->dev) &&
438 (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); 452 (nv04_display(dev)->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
439} 453}
440 454
441static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) 455static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
442{ 456{
443 struct drm_device *dev = encoder->dev;
444 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 457 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
458 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
445 459
446 if (nv_encoder->last_dpms == mode) 460 if (nv_encoder->last_dpms == mode)
447 return; 461 return;
448 nv_encoder->last_dpms = mode; 462 nv_encoder->last_dpms = mode;
449 463
450 NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n", 464 NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
451 mode, nv_encoder->dcb->index); 465 mode, nv_encoder->dcb->index);
452 466
453 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 467 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
@@ -479,8 +493,6 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
479{ 493{
480 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 494 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
481 495
482 NV_DEBUG_KMS(encoder->dev, "\n");
483
484 drm_encoder_cleanup(encoder); 496 drm_encoder_cleanup(encoder);
485 kfree(nv_encoder); 497 kfree(nv_encoder);
486} 498}
@@ -512,7 +524,7 @@ static const struct drm_encoder_funcs nv04_dac_funcs = {
512}; 524};
513 525
514int 526int
515nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) 527nv04_dac_create(struct drm_connector *connector, struct dcb_output *entry)
516{ 528{
517 const struct drm_encoder_helper_funcs *helper; 529 const struct drm_encoder_helper_funcs *helper;
518 struct nouveau_encoder *nv_encoder = NULL; 530 struct nouveau_encoder *nv_encoder = NULL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 55ad2dd653fc..da55d7642c8c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -27,7 +27,8 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_reg.h"
31#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 33#include "nouveau_connector.h"
33#include "nouveau_crtc.h" 34#include "nouveau_crtc.h"
@@ -36,6 +37,8 @@
36 37
37#include <drm/i2c/sil164.h> 38#include <drm/i2c/sil164.h>
38 39
40#include <subdev/i2c.h>
41
39#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \ 42#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
40 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \ 43 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
41 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS) 44 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
@@ -49,20 +52,20 @@ static inline bool is_fpc_off(uint32_t fpc)
49 FP_TG_CONTROL_OFF); 52 FP_TG_CONTROL_OFF);
50} 53}
51 54
52int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent) 55int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent)
53{ 56{
54 /* special case of nv_read_tmds to find crtc associated with an output. 57 /* special case of nv_read_tmds to find crtc associated with an output.
55 * this does not give a correct answer for off-chip dvi, but there's no 58 * this does not give a correct answer for off-chip dvi, but there's no
56 * use for such an answer anyway 59 * use for such an answer anyway
57 */ 60 */
58 int ramdac = (dcbent->or & OUTPUT_C) >> 2; 61 int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
59 62
60 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL, 63 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
61 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4); 64 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
62 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac; 65 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
63} 66}
64 67
65void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, 68void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
66 int head, bool dl) 69 int head, bool dl)
67{ 70{
68 /* The BIOS scripts don't do this for us, sadly 71 /* The BIOS scripts don't do this for us, sadly
@@ -72,13 +75,13 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
72 * (for VT restore etc.) 75 * (for VT restore etc.)
73 */ 76 */
74 77
75 int ramdac = (dcbent->or & OUTPUT_C) >> 2; 78 int ramdac = (dcbent->or & DCB_OUTPUT_C) >> 2;
76 uint8_t tmds04 = 0x80; 79 uint8_t tmds04 = 0x80;
77 80
78 if (head != ramdac) 81 if (head != ramdac)
79 tmds04 = 0x88; 82 tmds04 = 0x88;
80 83
81 if (dcbent->type == OUTPUT_LVDS) 84 if (dcbent->type == DCB_OUTPUT_LVDS)
82 tmds04 |= 0x01; 85 tmds04 |= 0x01;
83 86
84 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04); 87 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
@@ -89,8 +92,7 @@ void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
89 92
90void nv04_dfp_disable(struct drm_device *dev, int head) 93void nv04_dfp_disable(struct drm_device *dev, int head)
91{ 94{
92 struct drm_nouveau_private *dev_priv = dev->dev_private; 95 struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
93 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
94 96
95 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) & 97 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
96 FP_TG_CONTROL_ON) { 98 FP_TG_CONTROL_ON) {
@@ -111,14 +113,13 @@ void nv04_dfp_disable(struct drm_device *dev, int head)
111void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode) 113void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
112{ 114{
113 struct drm_device *dev = encoder->dev; 115 struct drm_device *dev = encoder->dev;
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 struct drm_crtc *crtc; 116 struct drm_crtc *crtc;
116 struct nouveau_crtc *nv_crtc; 117 struct nouveau_crtc *nv_crtc;
117 uint32_t *fpc; 118 uint32_t *fpc;
118 119
119 if (mode == DRM_MODE_DPMS_ON) { 120 if (mode == DRM_MODE_DPMS_ON) {
120 nv_crtc = nouveau_crtc(encoder->crtc); 121 nv_crtc = nouveau_crtc(encoder->crtc);
121 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; 122 fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
122 123
123 if (is_fpc_off(*fpc)) { 124 if (is_fpc_off(*fpc)) {
124 /* using saved value is ok, as (is_digital && dpms_on && 125 /* using saved value is ok, as (is_digital && dpms_on &&
@@ -133,7 +134,7 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
133 } else { 134 } else {
134 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
135 nv_crtc = nouveau_crtc(crtc); 136 nv_crtc = nouveau_crtc(crtc);
136 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control; 137 fpc = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index].fp_control;
137 138
138 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index); 139 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
139 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) { 140 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
@@ -151,10 +152,10 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
151static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder) 152static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
152{ 153{
153 struct drm_device *dev = encoder->dev; 154 struct drm_device *dev = encoder->dev;
154 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 155 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
155 struct drm_encoder *slave; 156 struct drm_encoder *slave;
156 157
157 if (dcb->type != OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP) 158 if (dcb->type != DCB_OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
158 return NULL; 159 return NULL;
159 160
160 /* Some BIOSes (e.g. the one in a Quadro FX1000) report several 161 /* Some BIOSes (e.g. the one in a Quadro FX1000) report several
@@ -168,9 +169,9 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
168 * let's do the same. 169 * let's do the same.
169 */ 170 */
170 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) { 171 list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
171 struct dcb_entry *slave_dcb = nouveau_encoder(slave)->dcb; 172 struct dcb_output *slave_dcb = nouveau_encoder(slave)->dcb;
172 173
173 if (slave_dcb->type == OUTPUT_TMDS && get_slave_funcs(slave) && 174 if (slave_dcb->type == DCB_OUTPUT_TMDS && get_slave_funcs(slave) &&
174 slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr) 175 slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
175 return slave; 176 return slave;
176 } 177 }
@@ -202,9 +203,8 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
202static void nv04_dfp_prepare_sel_clk(struct drm_device *dev, 203static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
203 struct nouveau_encoder *nv_encoder, int head) 204 struct nouveau_encoder *nv_encoder, int head)
204{ 205{
205 struct drm_nouveau_private *dev_priv = dev->dev_private; 206 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
206 struct nv04_mode_state *state = &dev_priv->mode_reg; 207 uint32_t bits1618 = nv_encoder->dcb->or & DCB_OUTPUT_A ? 0x10000 : 0x40000;
207 uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
208 208
209 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP) 209 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
210 return; 210 return;
@@ -233,8 +233,8 @@ static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
233 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table 233 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
234 * entry has the necessary info) 234 * entry has the necessary info)
235 */ 235 */
236 if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) { 236 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS && nv04_display(dev)->saved_reg.sel_clk & 0xf0) {
237 int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1; 237 int shift = (nv04_display(dev)->saved_reg.sel_clk & 0x50) ? 0 : 1;
238 238
239 state->sel_clk &= ~0xf0; 239 state->sel_clk &= ~0xf0;
240 state->sel_clk |= (head ? 0x40 : 0x10) << shift; 240 state->sel_clk |= (head ? 0x40 : 0x10) << shift;
@@ -246,9 +246,8 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
246 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 246 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
247 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 247 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
248 struct drm_device *dev = encoder->dev; 248 struct drm_device *dev = encoder->dev;
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 int head = nouveau_crtc(encoder->crtc)->index; 249 int head = nouveau_crtc(encoder->crtc)->index;
251 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg; 250 struct nv04_crtc_reg *crtcstate = nv04_display(dev)->mode_reg.crtc_reg;
252 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX]; 251 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
253 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX]; 252 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
254 253
@@ -263,7 +262,7 @@ static void nv04_dfp_prepare(struct drm_encoder *encoder)
263 *cr_lcd |= head ? 0x0 : 0x8; 262 *cr_lcd |= head ? 0x0 : 0x8;
264 else { 263 else {
265 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30; 264 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
266 if (nv_encoder->dcb->type == OUTPUT_LVDS) 265 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS)
267 *cr_lcd |= 0x30; 266 *cr_lcd |= 0x30;
268 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) { 267 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
269 /* avoid being connected to both crtcs */ 268 /* avoid being connected to both crtcs */
@@ -282,17 +281,18 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
282 struct drm_display_mode *adjusted_mode) 281 struct drm_display_mode *adjusted_mode)
283{ 282{
284 struct drm_device *dev = encoder->dev; 283 struct drm_device *dev = encoder->dev;
285 struct drm_nouveau_private *dev_priv = dev->dev_private; 284 struct nouveau_device *device = nouveau_dev(dev);
285 struct nouveau_drm *drm = nouveau_drm(dev);
286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 286 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
287 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 287 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
288 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index]; 288 struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc); 289 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 290 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
291 struct drm_display_mode *output_mode = &nv_encoder->mode; 291 struct drm_display_mode *output_mode = &nv_encoder->mode;
292 struct drm_connector *connector = &nv_connector->base; 292 struct drm_connector *connector = &nv_connector->base;
293 uint32_t mode_ratio, panel_ratio; 293 uint32_t mode_ratio, panel_ratio;
294 294
295 NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index); 295 NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index);
296 drm_mode_debug_printmodeline(output_mode); 296 drm_mode_debug_printmodeline(output_mode);
297 297
298 /* Initialize the FP registers in this CRTC. */ 298 /* Initialize the FP registers in this CRTC. */
@@ -300,10 +300,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
300 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; 300 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
301 if (!nv_gf4_disp_arch(dev) || 301 if (!nv_gf4_disp_arch(dev) ||
302 (output_mode->hsync_start - output_mode->hdisplay) >= 302 (output_mode->hsync_start - output_mode->hdisplay) >=
303 dev_priv->vbios.digital_min_front_porch) 303 drm->vbios.digital_min_front_porch)
304 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; 304 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
305 else 305 else
306 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1; 306 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - drm->vbios.digital_min_front_porch - 1;
307 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; 307 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
308 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; 308 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
309 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; 309 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
@@ -335,12 +335,12 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE; 335 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
336 else /* gpu needs to scale */ 336 else /* gpu needs to scale */
337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE; 337 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
338 if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT) 338 if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; 339 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && 340 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
341 output_mode->clock > 165000) 341 output_mode->clock > 165000)
342 regp->fp_control |= (2 << 24); 342 regp->fp_control |= (2 << 24);
343 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 343 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
344 bool duallink = false, dummy; 344 bool duallink = false, dummy;
345 if (nv_connector->edid && 345 if (nv_connector->edid &&
346 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) { 346 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || 416 if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO && 417 (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) { 418 encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
419 if (dev_priv->chipset == 0x11) 419 if (nv_device(drm->device)->chipset == 0x11)
420 regp->dither = savep->dither | 0x00010000; 420 regp->dither = savep->dither | 0x00010000;
421 else { 421 else {
422 int i; 422 int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
427 } 427 }
428 } 428 }
429 } else { 429 } else {
430 if (dev_priv->chipset != 0x11) { 430 if (nv_device(drm->device)->chipset != 0x11) {
431 /* reset them */ 431 /* reset them */
432 int i; 432 int i;
433 for (i = 0; i < 3; i++) { 433 for (i = 0; i < 3; i++) {
@@ -444,26 +444,26 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
444static void nv04_dfp_commit(struct drm_encoder *encoder) 444static void nv04_dfp_commit(struct drm_encoder *encoder)
445{ 445{
446 struct drm_device *dev = encoder->dev; 446 struct drm_device *dev = encoder->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private; 447 struct nouveau_drm *drm = nouveau_drm(dev);
448 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 448 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
449 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 449 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
450 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 450 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
451 struct dcb_entry *dcbe = nv_encoder->dcb; 451 struct dcb_output *dcbe = nv_encoder->dcb;
452 int head = nouveau_crtc(encoder->crtc)->index; 452 int head = nouveau_crtc(encoder->crtc)->index;
453 struct drm_encoder *slave_encoder; 453 struct drm_encoder *slave_encoder;
454 454
455 if (dcbe->type == OUTPUT_TMDS) 455 if (dcbe->type == DCB_OUTPUT_TMDS)
456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); 456 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
457 else if (dcbe->type == OUTPUT_LVDS) 457 else if (dcbe->type == DCB_OUTPUT_LVDS)
458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock); 458 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
459 459
460 /* update fp_control state for any changes made by scripts, 460 /* update fp_control state for any changes made by scripts,
461 * so correct value is written at DPMS on */ 461 * so correct value is written at DPMS on */
462 dev_priv->mode_reg.crtc_reg[head].fp_control = 462 nv04_display(dev)->mode_reg.crtc_reg[head].fp_control =
463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); 463 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
464 464
465 /* This could use refinement for flatpanels, but it should work this way */ 465 /* This could use refinement for flatpanels, but it should work this way */
466 if (dev_priv->chipset < 0x44) 466 if (nv_device(drm->device)->chipset < 0x44)
467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); 467 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
468 else 468 else
469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); 469 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -476,7 +476,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
476 476
477 helper->dpms(encoder, DRM_MODE_DPMS_ON); 477 helper->dpms(encoder, DRM_MODE_DPMS_ON);
478 478
479 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 479 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), 480 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 481 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
482} 482}
@@ -485,6 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
485{ 485{
486#ifdef __powerpc__ 486#ifdef __powerpc__
487 struct drm_device *dev = encoder->dev; 487 struct drm_device *dev = encoder->dev;
488 struct nouveau_device *device = nouveau_dev(dev);
488 489
489 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
490 * Apple for your consistency. 491 * Apple for your consistency.
@@ -492,11 +493,11 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
492 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 493 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
493 dev->pci_device == 0x0329) { 494 dev->pci_device == 0x0329) {
494 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
495 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
496 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
497 } else { 498 } else {
498 nv_mask(dev, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0); 499 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 0);
499 nv_mask(dev, NV_PCRTC_GPIO_EXT, 3, 0); 500 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 0);
500 } 501 }
501 } 502 }
502#endif 503#endif
@@ -511,7 +512,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
511{ 512{
512 struct drm_device *dev = encoder->dev; 513 struct drm_device *dev = encoder->dev;
513 struct drm_crtc *crtc = encoder->crtc; 514 struct drm_crtc *crtc = encoder->crtc;
514 struct drm_nouveau_private *dev_priv = dev->dev_private; 515 struct nouveau_drm *drm = nouveau_drm(dev);
515 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 516 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
516 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms); 517 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
517 518
@@ -519,7 +520,7 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
519 return; 520 return;
520 nv_encoder->last_dpms = mode; 521 nv_encoder->last_dpms = mode;
521 522
522 NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n", 523 NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
523 mode, nv_encoder->dcb->index); 524 mode, nv_encoder->dcb->index);
524 525
525 if (was_powersaving && is_powersaving_dpms(mode)) 526 if (was_powersaving && is_powersaving_dpms(mode))
@@ -549,22 +550,22 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
549 if (mode == DRM_MODE_DPMS_ON) 550 if (mode == DRM_MODE_DPMS_ON)
550 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index); 551 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
551 else { 552 else {
552 dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); 553 nv04_display(dev)->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
553 dev_priv->mode_reg.sel_clk &= ~0xf0; 554 nv04_display(dev)->mode_reg.sel_clk &= ~0xf0;
554 } 555 }
555 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk); 556 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
556} 557}
557 558
558static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode) 559static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
559{ 560{
560 struct drm_device *dev = encoder->dev; 561 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
561 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 562 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
562 563
563 if (nv_encoder->last_dpms == mode) 564 if (nv_encoder->last_dpms == mode)
564 return; 565 return;
565 nv_encoder->last_dpms = mode; 566 nv_encoder->last_dpms = mode;
566 567
567 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n", 568 NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
568 mode, nv_encoder->dcb->index); 569 mode, nv_encoder->dcb->index);
569 570
570 nv04_dfp_update_backlight(encoder, mode); 571 nv04_dfp_update_backlight(encoder, mode);
@@ -585,10 +586,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
585{ 586{
586 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 587 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
587 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
588 struct drm_nouveau_private *dev_priv = dev->dev_private;
589 int head = nv_encoder->restore.head; 589 int head = nv_encoder->restore.head;
590 590
591 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 591 if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
592 struct nouveau_connector *connector = 592 struct nouveau_connector *connector =
593 nouveau_encoder_connector_get(nv_encoder); 593 nouveau_encoder_connector_get(nv_encoder);
594 594
@@ -597,9 +597,9 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
597 LVDS_PANEL_ON, 597 LVDS_PANEL_ON,
598 connector->native_mode->clock); 598 connector->native_mode->clock);
599 599
600 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) { 600 } else if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
601 int clock = nouveau_hw_pllvals_to_clk 601 int clock = nouveau_hw_pllvals_to_clk
602 (&dev_priv->saved_reg.crtc_reg[head].pllvals); 602 (&nv04_display(dev)->saved_reg.crtc_reg[head].pllvals);
603 603
604 run_tmds_table(dev, nv_encoder->dcb, head, clock); 604 run_tmds_table(dev, nv_encoder->dcb, head, clock);
605 } 605 }
@@ -611,8 +611,6 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
611{ 611{
612 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 612 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
613 613
614 NV_DEBUG_KMS(encoder->dev, "\n");
615
616 if (get_slave_funcs(encoder)) 614 if (get_slave_funcs(encoder))
617 get_slave_funcs(encoder)->destroy(encoder); 615 get_slave_funcs(encoder)->destroy(encoder);
618 616
@@ -623,8 +621,10 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
623static void nv04_tmds_slave_init(struct drm_encoder *encoder) 621static void nv04_tmds_slave_init(struct drm_encoder *encoder)
624{ 622{
625 struct drm_device *dev = encoder->dev; 623 struct drm_device *dev = encoder->dev;
626 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 624 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
627 struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, 2); 625 struct nouveau_drm *drm = nouveau_drm(dev);
626 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
627 struct nouveau_i2c_port *port = i2c->find(i2c, 2);
628 struct i2c_board_info info[] = { 628 struct i2c_board_info info[] = {
629 { 629 {
630 .type = "sil164", 630 .type = "sil164",
@@ -637,16 +637,16 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
637 }; 637 };
638 int type; 638 int type;
639 639
640 if (!nv_gf4_disp_arch(dev) || !i2c || 640 if (!nv_gf4_disp_arch(dev) || !port ||
641 get_tmds_slave(encoder)) 641 get_tmds_slave(encoder))
642 return; 642 return;
643 643
644 type = nouveau_i2c_identify(dev, "TMDS transmitter", info, NULL, 2); 644 type = i2c->identify(i2c, 2, "TMDS transmitter", info, NULL);
645 if (type < 0) 645 if (type < 0)
646 return; 646 return;
647 647
648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 648 drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
649 &i2c->adapter, &info[type]); 649 &port->adapter, &info[type]);
650} 650}
651 651
652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = { 652static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
@@ -676,7 +676,7 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = {
676}; 676};
677 677
678int 678int
679nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) 679nv04_dfp_create(struct drm_connector *connector, struct dcb_output *entry)
680{ 680{
681 const struct drm_encoder_helper_funcs *helper; 681 const struct drm_encoder_helper_funcs *helper;
682 struct nouveau_encoder *nv_encoder = NULL; 682 struct nouveau_encoder *nv_encoder = NULL;
@@ -684,11 +684,11 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
684 int type; 684 int type;
685 685
686 switch (entry->type) { 686 switch (entry->type) {
687 case OUTPUT_TMDS: 687 case DCB_OUTPUT_TMDS:
688 type = DRM_MODE_ENCODER_TMDS; 688 type = DRM_MODE_ENCODER_TMDS;
689 helper = &nv04_tmds_helper_funcs; 689 helper = &nv04_tmds_helper_funcs;
690 break; 690 break;
691 case OUTPUT_LVDS: 691 case DCB_OUTPUT_LVDS:
692 type = DRM_MODE_ENCODER_LVDS; 692 type = DRM_MODE_ENCODER_LVDS;
693 helper = &nv04_lvds_helper_funcs; 693 helper = &nv04_lvds_helper_funcs;
694 break; 694 break;
@@ -711,7 +711,7 @@ nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
711 encoder->possible_crtcs = entry->heads; 711 encoder->possible_crtcs = entry->heads;
712 encoder->possible_clones = 0; 712 encoder->possible_clones = 0;
713 713
714 if (entry->type == OUTPUT_TMDS && 714 if (entry->type == DCB_OUTPUT_TMDS &&
715 entry->location != DCB_LOC_ON_CHIP) 715 entry->location != DCB_LOC_ON_CHIP)
716 nv04_tmds_slave_init(encoder); 716 nv04_tmds_slave_init(encoder);
717 717
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ea1e47a34ddf..846050f04c23 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -25,78 +25,15 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_crtc_helper.h> 26#include <drm/drm_crtc_helper.h>
27 27
28#include "nouveau_drv.h" 28#include "nouveau_drm.h"
29#include "nouveau_fb.h" 29#include "nouveau_reg.h"
30#include "nouveau_hw.h" 30#include "nouveau_hw.h"
31#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 32#include "nouveau_connector.h"
33 33
34static void nv04_vblank_crtc0_isr(struct drm_device *);
35static void nv04_vblank_crtc1_isr(struct drm_device *);
36
37static void
38nv04_display_store_initial_head_owner(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 if (dev_priv->chipset != 0x11) {
43 dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
44 return;
45 }
46
47 /* reading CR44 is broken on nv11, so we attempt to infer it */
48 if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
49 dev_priv->crtc_owner = 0x4;
50 else {
51 uint8_t slaved_on_A, slaved_on_B;
52 bool tvA = false;
53 bool tvB = false;
54
55 slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
56 0x80;
57 if (slaved_on_B)
58 tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
59 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
60
61 slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
62 0x80;
63 if (slaved_on_A)
64 tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
65 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
66
67 if (slaved_on_A && !tvA)
68 dev_priv->crtc_owner = 0x0;
69 else if (slaved_on_B && !tvB)
70 dev_priv->crtc_owner = 0x3;
71 else if (slaved_on_A)
72 dev_priv->crtc_owner = 0x0;
73 else if (slaved_on_B)
74 dev_priv->crtc_owner = 0x3;
75 else
76 dev_priv->crtc_owner = 0x0;
77 }
78}
79
80int 34int
81nv04_display_early_init(struct drm_device *dev) 35nv04_display_early_init(struct drm_device *dev)
82{ 36{
83 /* Make the I2C buses accessible. */
84 if (!nv_gf4_disp_arch(dev)) {
85 uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
86
87 if (!(pmc_enable & 1))
88 nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
89 }
90
91 /* Unlock the VGA CRTCs. */
92 NVLockVgaCrtcs(dev, false);
93
94 /* Make sure the CRTCs aren't in slaved mode. */
95 if (nv_two_heads(dev)) {
96 nv04_display_store_initial_head_owner(dev);
97 NVSetOwner(dev, 0);
98 }
99
100 /* ensure vblank interrupts are off, they can't be enabled until 37 /* ensure vblank interrupts are off, they can't be enabled until
101 * drm_vblank has been initialised 38 * drm_vblank has been initialised
102 */ 39 */
@@ -110,25 +47,29 @@ nv04_display_early_init(struct drm_device *dev)
110void 47void
111nv04_display_late_takedown(struct drm_device *dev) 48nv04_display_late_takedown(struct drm_device *dev)
112{ 49{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114
115 if (nv_two_heads(dev))
116 NVSetOwner(dev, dev_priv->crtc_owner);
117
118 NVLockVgaCrtcs(dev, true);
119} 50}
120 51
121int 52int
122nv04_display_create(struct drm_device *dev) 53nv04_display_create(struct drm_device *dev)
123{ 54{
124 struct drm_nouveau_private *dev_priv = dev->dev_private; 55 struct nouveau_drm *drm = nouveau_drm(dev);
125 struct dcb_table *dcb = &dev_priv->vbios.dcb; 56 struct dcb_table *dcb = &drm->vbios.dcb;
126 struct drm_connector *connector, *ct; 57 struct drm_connector *connector, *ct;
127 struct drm_encoder *encoder; 58 struct drm_encoder *encoder;
128 struct drm_crtc *crtc; 59 struct drm_crtc *crtc;
60 struct nv04_display *disp;
129 int i, ret; 61 int i, ret;
130 62
131 NV_DEBUG_KMS(dev, "\n"); 63 NV_DEBUG(drm, "\n");
64
65 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
66 if (!disp)
67 return -ENOMEM;
68
69 nouveau_display(dev)->priv = disp;
70 nouveau_display(dev)->dtor = nv04_display_destroy;
71 nouveau_display(dev)->init = nv04_display_init;
72 nouveau_display(dev)->fini = nv04_display_fini;
132 73
133 nouveau_hw_save_vga_fonts(dev, 1); 74 nouveau_hw_save_vga_fonts(dev, 1);
134 75
@@ -137,28 +78,28 @@ nv04_display_create(struct drm_device *dev)
137 nv04_crtc_create(dev, 1); 78 nv04_crtc_create(dev, 1);
138 79
139 for (i = 0; i < dcb->entries; i++) { 80 for (i = 0; i < dcb->entries; i++) {
140 struct dcb_entry *dcbent = &dcb->entry[i]; 81 struct dcb_output *dcbent = &dcb->entry[i];
141 82
142 connector = nouveau_connector_create(dev, dcbent->connector); 83 connector = nouveau_connector_create(dev, dcbent->connector);
143 if (IS_ERR(connector)) 84 if (IS_ERR(connector))
144 continue; 85 continue;
145 86
146 switch (dcbent->type) { 87 switch (dcbent->type) {
147 case OUTPUT_ANALOG: 88 case DCB_OUTPUT_ANALOG:
148 ret = nv04_dac_create(connector, dcbent); 89 ret = nv04_dac_create(connector, dcbent);
149 break; 90 break;
150 case OUTPUT_LVDS: 91 case DCB_OUTPUT_LVDS:
151 case OUTPUT_TMDS: 92 case DCB_OUTPUT_TMDS:
152 ret = nv04_dfp_create(connector, dcbent); 93 ret = nv04_dfp_create(connector, dcbent);
153 break; 94 break;
154 case OUTPUT_TV: 95 case DCB_OUTPUT_TV:
155 if (dcbent->location == DCB_LOC_ON_CHIP) 96 if (dcbent->location == DCB_LOC_ON_CHIP)
156 ret = nv17_tv_create(connector, dcbent); 97 ret = nv17_tv_create(connector, dcbent);
157 else 98 else
158 ret = nv04_tv_create(connector, dcbent); 99 ret = nv04_tv_create(connector, dcbent);
159 break; 100 break;
160 default: 101 default:
161 NV_WARN(dev, "DCB type %d not known\n", dcbent->type); 102 NV_WARN(drm, "DCB type %d not known\n", dcbent->type);
162 continue; 103 continue;
163 } 104 }
164 105
@@ -169,7 +110,7 @@ nv04_display_create(struct drm_device *dev)
169 list_for_each_entry_safe(connector, ct, 110 list_for_each_entry_safe(connector, ct,
170 &dev->mode_config.connector_list, head) { 111 &dev->mode_config.connector_list, head) {
171 if (!connector->encoder_ids[0]) { 112 if (!connector->encoder_ids[0]) {
172 NV_WARN(dev, "%s has no encoders, removing\n", 113 NV_WARN(drm, "%s has no encoders, removing\n",
173 drm_get_connector_name(connector)); 114 drm_get_connector_name(connector));
174 connector->funcs->destroy(connector); 115 connector->funcs->destroy(connector);
175 } 116 }
@@ -185,21 +126,18 @@ nv04_display_create(struct drm_device *dev)
185 func->save(encoder); 126 func->save(encoder);
186 } 127 }
187 128
188 nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
189 nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
190 return 0; 129 return 0;
191} 130}
192 131
193void 132void
194nv04_display_destroy(struct drm_device *dev) 133nv04_display_destroy(struct drm_device *dev)
195{ 134{
135 struct nouveau_drm *drm = nouveau_drm(dev);
136 struct nv04_display *disp = nv04_display(dev);
196 struct drm_encoder *encoder; 137 struct drm_encoder *encoder;
197 struct drm_crtc *crtc; 138 struct drm_crtc *crtc;
198 139
199 NV_DEBUG_KMS(dev, "\n"); 140 NV_DEBUG(drm, "\n");
200
201 nouveau_irq_unregister(dev, 24);
202 nouveau_irq_unregister(dev, 25);
203 141
204 /* Turn every CRTC off. */ 142 /* Turn every CRTC off. */
205 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 143 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -221,6 +159,9 @@ nv04_display_destroy(struct drm_device *dev)
221 crtc->funcs->restore(crtc); 159 crtc->funcs->restore(crtc);
222 160
223 nouveau_hw_save_vga_fonts(dev, 0); 161 nouveau_hw_save_vga_fonts(dev, 0);
162
163 nouveau_display(dev)->priv = NULL;
164 kfree(disp);
224} 165}
225 166
226int 167int
@@ -257,17 +198,3 @@ nv04_display_fini(struct drm_device *dev)
257 if (nv_two_heads(dev)) 198 if (nv_two_heads(dev))
258 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0); 199 NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
259} 200}
260
261static void
262nv04_vblank_crtc0_isr(struct drm_device *dev)
263{
264 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
265 drm_handle_vblank(dev, 0);
266}
267
268static void
269nv04_vblank_crtc1_isr(struct drm_device *dev)
270{
271 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
272 drm_handle_vblank(dev, 1);
273}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
new file mode 100644
index 000000000000..45322802e37d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -0,0 +1,184 @@
1#ifndef __NV04_DISPLAY_H__
2#define __NV04_DISPLAY_H__
3
4#include <subdev/bios/pll.h>
5
6#include "nouveau_display.h"
7
8enum nv04_fp_display_regs {
9 FP_DISPLAY_END,
10 FP_TOTAL,
11 FP_CRTC,
12 FP_SYNC_START,
13 FP_SYNC_END,
14 FP_VALID_START,
15 FP_VALID_END
16};
17
18struct nv04_crtc_reg {
19 unsigned char MiscOutReg;
20 uint8_t CRTC[0xa0];
21 uint8_t CR58[0x10];
22 uint8_t Sequencer[5];
23 uint8_t Graphics[9];
24 uint8_t Attribute[21];
25 unsigned char DAC[768];
26
27 /* PCRTC regs */
28 uint32_t fb_start;
29 uint32_t crtc_cfg;
30 uint32_t cursor_cfg;
31 uint32_t gpio_ext;
32 uint32_t crtc_830;
33 uint32_t crtc_834;
34 uint32_t crtc_850;
35 uint32_t crtc_eng_ctrl;
36
37 /* PRAMDAC regs */
38 uint32_t nv10_cursync;
39 struct nouveau_pll_vals pllvals;
40 uint32_t ramdac_gen_ctrl;
41 uint32_t ramdac_630;
42 uint32_t ramdac_634;
43 uint32_t tv_setup;
44 uint32_t tv_vtotal;
45 uint32_t tv_vskew;
46 uint32_t tv_vsync_delay;
47 uint32_t tv_htotal;
48 uint32_t tv_hskew;
49 uint32_t tv_hsync_delay;
50 uint32_t tv_hsync_delay2;
51 uint32_t fp_horiz_regs[7];
52 uint32_t fp_vert_regs[7];
53 uint32_t dither;
54 uint32_t fp_control;
55 uint32_t dither_regs[6];
56 uint32_t fp_debug_0;
57 uint32_t fp_debug_1;
58 uint32_t fp_debug_2;
59 uint32_t fp_margin_color;
60 uint32_t ramdac_8c0;
61 uint32_t ramdac_a20;
62 uint32_t ramdac_a24;
63 uint32_t ramdac_a34;
64 uint32_t ctv_regs[38];
65};
66
67struct nv04_output_reg {
68 uint32_t output;
69 int head;
70};
71
72struct nv04_mode_state {
73 struct nv04_crtc_reg crtc_reg[2];
74 uint32_t pllsel;
75 uint32_t sel_clk;
76};
77
78struct nv04_display {
79 struct nv04_mode_state mode_reg;
80 struct nv04_mode_state saved_reg;
81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4];
83};
84
85static inline struct nv04_display *
86nv04_display(struct drm_device *dev)
87{
88 return nouveau_display(dev)->priv;
89}
90
91/* nv04_display.c */
92int nv04_display_early_init(struct drm_device *);
93void nv04_display_late_takedown(struct drm_device *);
94int nv04_display_create(struct drm_device *);
95void nv04_display_destroy(struct drm_device *);
96int nv04_display_init(struct drm_device *);
97void nv04_display_fini(struct drm_device *);
98
99/* nv04_crtc.c */
100int nv04_crtc_create(struct drm_device *, int index);
101
102/* nv04_dac.c */
103int nv04_dac_create(struct drm_connector *, struct dcb_output *);
104uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
105int nv04_dac_output_offset(struct drm_encoder *encoder);
106void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
107bool nv04_dac_in_use(struct drm_encoder *encoder);
108
109/* nv04_dfp.c */
110int nv04_dfp_create(struct drm_connector *, struct dcb_output *);
111int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_output *dcbent);
112void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_output *dcbent,
113 int head, bool dl);
114void nv04_dfp_disable(struct drm_device *dev, int head);
115void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
116
117/* nv04_tv.c */
118int nv04_tv_identify(struct drm_device *dev, int i2c_index);
119int nv04_tv_create(struct drm_connector *, struct dcb_output *);
120
121/* nv17_tv.c */
122int nv17_tv_create(struct drm_connector *, struct dcb_output *);
123
124static inline bool
125nv_two_heads(struct drm_device *dev)
126{
127 struct nouveau_drm *drm = nouveau_drm(dev);
128 const int impl = dev->pci_device & 0x0ff0;
129
130 if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
131 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
132 return true;
133
134 return false;
135}
136
137static inline bool
138nv_gf4_disp_arch(struct drm_device *dev)
139{
140 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
141}
142
143static inline bool
144nv_two_reg_pll(struct drm_device *dev)
145{
146 struct nouveau_drm *drm = nouveau_drm(dev);
147 const int impl = dev->pci_device & 0x0ff0;
148
149 if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
150 return true;
151 return false;
152}
153
154static inline bool
155nv_match_device(struct drm_device *dev, unsigned device,
156 unsigned sub_vendor, unsigned sub_device)
157{
158 return dev->pdev->device == device &&
159 dev->pdev->subsystem_vendor == sub_vendor &&
160 dev->pdev->subsystem_device == sub_device;
161}
162
163#include <subdev/bios.h>
164#include <subdev/bios/init.h>
165
166static inline void
167nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
168 struct dcb_output *outp, int crtc)
169{
170 struct nouveau_device *device = nouveau_dev(dev);
171 struct nouveau_bios *bios = nouveau_bios(device);
172 struct nvbios_init init = {
173 .subdev = nv_subdev(bios),
174 .bios = bios,
175 .offset = table,
176 .outp = outp,
177 .crtc = crtc,
178 .execute = 1,
179 };
180
181 nvbios_exec(&init);
182}
183
184#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
deleted file mode 100644
index 375f5533c313..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fb.c
+++ /dev/null
@@ -1,54 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5int
6nv04_fb_vram_init(struct drm_device *dev)
7{
8 struct drm_nouveau_private *dev_priv = dev->dev_private;
9 u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
10
11 if (boot0 & 0x00000100) {
12 dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
13 dev_priv->vram_size *= 1024 * 1024;
14 } else {
15 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
16 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
17 dev_priv->vram_size = 32 * 1024 * 1024;
18 break;
19 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
20 dev_priv->vram_size = 16 * 1024 * 1024;
21 break;
22 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
23 dev_priv->vram_size = 8 * 1024 * 1024;
24 break;
25 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
26 dev_priv->vram_size = 4 * 1024 * 1024;
27 break;
28 }
29 }
30
31 if ((boot0 & 0x00000038) <= 0x10)
32 dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
33 else
34 dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
35
36 return 0;
37}
38
39int
40nv04_fb_init(struct drm_device *dev)
41{
42 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
43 * nvidia reading PFB_CFG_0, then writing back its original value.
44 * (which was 0x701114 in this case)
45 */
46
47 nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
48 return 0;
49}
50
51void
52nv04_fb_takedown(struct drm_device *dev)
53{
54}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fc53a3922bce..77dcc9c50777 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,19 +22,18 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/object.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 29#include "nouveau_fbcon.h"
30 30
31int 31int
32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 32nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
33{ 33{
34 struct nouveau_fbdev *nfbdev = info->par; 34 struct nouveau_fbdev *nfbdev = info->par;
35 struct drm_device *dev = nfbdev->dev; 35 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
36 struct drm_nouveau_private *dev_priv = dev->dev_private; 36 struct nouveau_channel *chan = drm->channel;
37 struct nouveau_channel *chan = dev_priv->channel;
38 int ret; 37 int ret;
39 38
40 ret = RING_SPACE(chan, 4); 39 ret = RING_SPACE(chan, 4);
@@ -53,9 +52,8 @@ int
53nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 52nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54{ 53{
55 struct nouveau_fbdev *nfbdev = info->par; 54 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev; 55 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 56 struct nouveau_channel *chan = drm->channel;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret; 57 int ret;
60 58
61 ret = RING_SPACE(chan, 7); 59 ret = RING_SPACE(chan, 7);
@@ -81,9 +79,8 @@ int
81nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 79nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
82{ 80{
83 struct nouveau_fbdev *nfbdev = info->par; 81 struct nouveau_fbdev *nfbdev = info->par;
84 struct drm_device *dev = nfbdev->dev; 82 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
85 struct drm_nouveau_private *dev_priv = dev->dev_private; 83 struct nouveau_channel *chan = drm->channel;
86 struct nouveau_channel *chan = dev_priv->channel;
87 uint32_t fg; 84 uint32_t fg;
88 uint32_t bg; 85 uint32_t bg;
89 uint32_t dsize; 86 uint32_t dsize;
@@ -142,9 +139,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
142{ 139{
143 struct nouveau_fbdev *nfbdev = info->par; 140 struct nouveau_fbdev *nfbdev = info->par;
144 struct drm_device *dev = nfbdev->dev; 141 struct drm_device *dev = nfbdev->dev;
145 struct drm_nouveau_private *dev_priv = dev->dev_private; 142 struct nouveau_drm *drm = nouveau_drm(dev);
146 struct nouveau_channel *chan = dev_priv->channel; 143 struct nouveau_channel *chan = drm->channel;
147 const int sub = NvSubCtxSurf2D; 144 struct nouveau_device *device = nv_device(drm->device);
145 struct nouveau_object *object;
148 int surface_fmt, pattern_fmt, rect_fmt; 146 int surface_fmt, pattern_fmt, rect_fmt;
149 int ret; 147 int ret;
150 148
@@ -176,31 +174,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
176 return -EINVAL; 174 return -EINVAL;
177 } 175 }
178 176
179 ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, 177 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
180 dev_priv->card_type >= NV_10 ? 178 device->card_type >= NV_10 ? 0x0062 : 0x0042,
181 0x0062 : 0x0042); 179 NULL, 0, &object);
182 if (ret) 180 if (ret)
183 return ret; 181 return ret;
184 182
185 ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); 183 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
184 0x0019, NULL, 0, &object);
186 if (ret) 185 if (ret)
187 return ret; 186 return ret;
188 187
189 ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); 188 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
189 0x0043, NULL, 0, &object);
190 if (ret) 190 if (ret)
191 return ret; 191 return ret;
192 192
193 ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); 193 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
194 0x0044, NULL, 0, &object);
194 if (ret) 195 if (ret)
195 return ret; 196 return ret;
196 197
197 ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); 198 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
199 0x004a, NULL, 0, &object);
198 if (ret) 200 if (ret)
199 return ret; 201 return ret;
200 202
201 ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, 203 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
202 dev_priv->chipset >= 0x11 ? 204 device->chipset >= 0x11 ? 0x009f : 0x005f,
203 0x009f : 0x005f); 205 NULL, 0, &object);
204 if (ret) 206 if (ret)
205 return ret; 207 return ret;
206 208
@@ -209,25 +211,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
209 return 0; 211 return 0;
210 } 212 }
211 213
212 BEGIN_NV04(chan, sub, 0x0000, 1); 214 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
213 OUT_RING(chan, NvCtxSurf2D); 215 OUT_RING(chan, NvCtxSurf2D);
214 BEGIN_NV04(chan, sub, 0x0184, 2); 216 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
215 OUT_RING(chan, NvDmaFB); 217 OUT_RING(chan, NvDmaFB);
216 OUT_RING(chan, NvDmaFB); 218 OUT_RING(chan, NvDmaFB);
217 BEGIN_NV04(chan, sub, 0x0300, 4); 219 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
218 OUT_RING(chan, surface_fmt); 220 OUT_RING(chan, surface_fmt);
219 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16)); 221 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
220 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 222 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
221 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base); 223 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
222 224
223 BEGIN_NV04(chan, sub, 0x0000, 1); 225 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
224 OUT_RING(chan, NvRop); 226 OUT_RING(chan, NvRop);
225 BEGIN_NV04(chan, sub, 0x0300, 1); 227 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
226 OUT_RING(chan, 0x55); 228 OUT_RING(chan, 0x55);
227 229
228 BEGIN_NV04(chan, sub, 0x0000, 1); 230 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
229 OUT_RING(chan, NvImagePatt); 231 OUT_RING(chan, NvImagePatt);
230 BEGIN_NV04(chan, sub, 0x0300, 8); 232 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
231 OUT_RING(chan, pattern_fmt); 233 OUT_RING(chan, pattern_fmt);
232#ifdef __BIG_ENDIAN 234#ifdef __BIG_ENDIAN
233 OUT_RING(chan, 2); 235 OUT_RING(chan, 2);
@@ -241,9 +243,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
241 OUT_RING(chan, ~0); 243 OUT_RING(chan, ~0);
242 OUT_RING(chan, ~0); 244 OUT_RING(chan, ~0);
243 245
244 BEGIN_NV04(chan, sub, 0x0000, 1); 246 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
245 OUT_RING(chan, NvClipRect); 247 OUT_RING(chan, NvClipRect);
246 BEGIN_NV04(chan, sub, 0x0300, 2); 248 BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
247 OUT_RING(chan, 0); 249 OUT_RING(chan, 0);
248 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual); 250 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
249 251
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index aa6859270662..a220b94ba9f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,15 +22,14 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <engine/fifo.h>
26#include "nouveau_drv.h" 26
27#include "nouveau_drm.h"
27#include "nouveau_dma.h" 28#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h" 29#include "nouveau_fence.h"
30 30
31struct nv04_fence_chan { 31struct nv04_fence_chan {
32 struct nouveau_fence_chan base; 32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34}; 33};
35 34
36struct nv04_fence_priv { 35struct nv04_fence_priv {
@@ -57,84 +56,56 @@ nv04_fence_sync(struct nouveau_fence *fence,
57 return -ENODEV; 56 return -ENODEV;
58} 57}
59 58
60int
61nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
62{
63 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
64 atomic_set(&fctx->sequence, data);
65 return 0;
66}
67
68static u32 59static u32
69nv04_fence_read(struct nouveau_channel *chan) 60nv04_fence_read(struct nouveau_channel *chan)
70{ 61{
71 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 62 struct nouveau_fifo_chan *fifo = (void *)chan->object;
72 return atomic_read(&fctx->sequence); 63 return atomic_read(&fifo->refcnt);
73} 64}
74 65
75static void 66static void
76nv04_fence_context_del(struct nouveau_channel *chan, int engine) 67nv04_fence_context_del(struct nouveau_channel *chan)
77{ 68{
78 struct nv04_fence_chan *fctx = chan->engctx[engine]; 69 struct nv04_fence_chan *fctx = chan->fence;
79 nouveau_fence_context_del(&fctx->base); 70 nouveau_fence_context_del(&fctx->base);
80 chan->engctx[engine] = NULL; 71 chan->fence = NULL;
81 kfree(fctx); 72 kfree(fctx);
82} 73}
83 74
84static int 75static int
85nv04_fence_context_new(struct nouveau_channel *chan, int engine) 76nv04_fence_context_new(struct nouveau_channel *chan)
86{ 77{
87 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 78 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
88 if (fctx) { 79 if (fctx) {
89 nouveau_fence_context_new(&fctx->base); 80 nouveau_fence_context_new(&fctx->base);
90 atomic_set(&fctx->sequence, 0); 81 chan->fence = fctx;
91 chan->engctx[engine] = fctx;
92 return 0; 82 return 0;
93 } 83 }
94 return -ENOMEM; 84 return -ENOMEM;
95} 85}
96 86
97static int
98nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
99{
100 return 0;
101}
102
103static int
104nv04_fence_init(struct drm_device *dev, int engine)
105{
106 return 0;
107}
108
109static void 87static void
110nv04_fence_destroy(struct drm_device *dev, int engine) 88nv04_fence_destroy(struct nouveau_drm *drm)
111{ 89{
112 struct drm_nouveau_private *dev_priv = dev->dev_private; 90 struct nv04_fence_priv *priv = drm->fence;
113 struct nv04_fence_priv *priv = nv_engine(dev, engine); 91 drm->fence = NULL;
114
115 dev_priv->eng[engine] = NULL;
116 kfree(priv); 92 kfree(priv);
117} 93}
118 94
119int 95int
120nv04_fence_create(struct drm_device *dev) 96nv04_fence_create(struct nouveau_drm *drm)
121{ 97{
122 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nv04_fence_priv *priv; 98 struct nv04_fence_priv *priv;
124 int ret;
125 99
126 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 100 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
127 if (!priv) 101 if (!priv)
128 return -ENOMEM; 102 return -ENOMEM;
129 103
130 priv->base.engine.destroy = nv04_fence_destroy; 104 priv->base.dtor = nv04_fence_destroy;
131 priv->base.engine.init = nv04_fence_init; 105 priv->base.context_new = nv04_fence_context_new;
132 priv->base.engine.fini = nv04_fence_fini; 106 priv->base.context_del = nv04_fence_context_del;
133 priv->base.engine.context_new = nv04_fence_context_new;
134 priv->base.engine.context_del = nv04_fence_context_del;
135 priv->base.emit = nv04_fence_emit; 107 priv->base.emit = nv04_fence_emit;
136 priv->base.sync = nv04_fence_sync; 108 priv->base.sync = nv04_fence_sync;
137 priv->base.read = nv04_fence_read; 109 priv->base.read = nv04_fence_read;
138 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine; 110 return 0;
139 return ret;
140} 111}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
deleted file mode 100644
index 65f966deeee6..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
32#include "nouveau_software.h"
33
34static struct ramfc_desc {
35 unsigned bits:6;
36 unsigned ctxs:5;
37 unsigned ctxp:8;
38 unsigned regs:5;
39 unsigned regp;
40} nv04_ramfc[] = {
41 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
42 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
43 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
49 {}
50};
51
52struct nv04_fifo_priv {
53 struct nouveau_fifo_priv base;
54 struct ramfc_desc *ramfc_desc;
55};
56
57struct nv04_fifo_chan {
58 struct nouveau_fifo_chan base;
59 struct nouveau_gpuobj *ramfc;
60};
61
62bool
63nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
64{
65 int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
66
67 if (!enable) {
68 /* In some cases the PFIFO puller may be left in an
69 * inconsistent state if you try to stop it when it's
70 * busy translating handles. Sometimes you get a
71 * PFIFO_CACHE_ERROR, sometimes it just fails silently
72 * sending incorrect instance offsets to PGRAPH after
73 * it's started up again. To avoid the latter we
74 * invalidate the most recently calculated instance.
75 */
76 if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
77 NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
78 NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
79
80 if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
81 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
82 nv_wr32(dev, NV03_PFIFO_INTR_0,
83 NV_PFIFO_INTR_CACHE_ERROR);
84
85 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
86 }
87
88 return pull & 1;
89}
90
91static int
92nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
93{
94 struct drm_device *dev = chan->dev;
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
97 struct nv04_fifo_chan *fctx;
98 unsigned long flags;
99 int ret;
100
101 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
102 if (!fctx)
103 return -ENOMEM;
104
105 /* map channel control registers */
106 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
107 NV03_USER(chan->id), PAGE_SIZE);
108 if (!chan->user) {
109 ret = -ENOMEM;
110 goto error;
111 }
112
113 /* initialise default fifo context */
114 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
115 chan->id * 32, ~0, 32,
116 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
117 if (ret)
118 goto error;
119
120 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
121 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
122 nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
123 nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
124 nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
125 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
126#ifdef __BIG_ENDIAN
127 NV_PFIFO_CACHE1_BIG_ENDIAN |
128#endif
129 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
130 nv_wo32(fctx->ramfc, 0x14, 0x00000000);
131 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
132 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
133
134 /* enable dma mode on the channel */
135 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
136 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
137 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
138
139error:
140 if (ret)
141 priv->base.base.context_del(chan, engine);
142 return ret;
143}
144
145void
146nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
147{
148 struct drm_device *dev = chan->dev;
149 struct drm_nouveau_private *dev_priv = dev->dev_private;
150 struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
151 struct nv04_fifo_chan *fctx = chan->engctx[engine];
152 struct ramfc_desc *c = priv->ramfc_desc;
153 unsigned long flags;
154 int chid;
155
156 /* prevent fifo context switches */
157 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
158 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
159
160 /* if this channel is active, replace it with a null context */
161 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
162 if (chid == chan->id) {
163 nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
164 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
165 nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
166
167 do {
168 u32 mask = ((1ULL << c->bits) - 1) << c->regs;
169 nv_mask(dev, c->regp, mask, 0x00000000);
170 } while ((++c)->bits);
171
172 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
173 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
174 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
175 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
176 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
177 }
178
179 /* restore normal operation, after disabling dma mode */
180 nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
181 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
182 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
183
184 /* clean up */
185 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
186 nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
187 if (chan->user) {
188 iounmap(chan->user);
189 chan->user = NULL;
190 }
191}
192
193int
194nv04_fifo_init(struct drm_device *dev, int engine)
195{
196 struct drm_nouveau_private *dev_priv = dev->dev_private;
197 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
198 int i;
199
200 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
201 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
202
203 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
204 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
205
206 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
207 ((dev_priv->ramht->bits - 9) << 16) |
208 (dev_priv->ramht->gpuobj->pinst >> 8));
209 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
210 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
211
212 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
213
214 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
215 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
216
217 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
218 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
219 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
220
221 for (i = 0; i < priv->base.channels; i++) {
222 if (dev_priv->channels.ptr[i])
223 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
224 }
225
226 return 0;
227}
228
229int
230nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
231{
232 struct drm_nouveau_private *dev_priv = dev->dev_private;
233 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
234 struct nouveau_channel *chan;
235 int chid;
236
237 /* prevent context switches and halt fifo operation */
238 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
239 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
240 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
241 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
242
243 /* store current fifo context in ramfc */
244 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
245 chan = dev_priv->channels.ptr[chid];
246 if (suspend && chid != priv->base.channels && chan) {
247 struct nv04_fifo_chan *fctx = chan->engctx[engine];
248 struct nouveau_gpuobj *ctx = fctx->ramfc;
249 struct ramfc_desc *c = priv->ramfc_desc;
250 do {
251 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
252 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
253 u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
254 u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
255 nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
256 } while ((++c)->bits);
257 }
258
259 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
260 return 0;
261}
262
263static bool
264nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
265{
266 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
267 struct drm_nouveau_private *dev_priv = dev->dev_private;
268 struct nouveau_channel *chan = NULL;
269 struct nouveau_gpuobj *obj;
270 unsigned long flags;
271 const int subc = (addr >> 13) & 0x7;
272 const int mthd = addr & 0x1ffc;
273 bool handled = false;
274 u32 engine;
275
276 spin_lock_irqsave(&dev_priv->channels.lock, flags);
277 if (likely(chid >= 0 && chid < pfifo->channels))
278 chan = dev_priv->channels.ptr[chid];
279 if (unlikely(!chan))
280 goto out;
281
282 switch (mthd) {
283 case 0x0000: /* bind object to subchannel */
284 obj = nouveau_ramht_find(chan, data);
285 if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
286 break;
287
288 engine = 0x0000000f << (subc * 4);
289
290 nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
291 handled = true;
292 break;
293 default:
294 engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
295 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
296 break;
297
298 if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
299 mthd, data))
300 handled = true;
301 break;
302 }
303
304out:
305 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
306 return handled;
307}
308
309static const char *nv_dma_state_err(u32 state)
310{
311 static const char * const desc[] = {
312 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
313 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
314 };
315 return desc[(state >> 29) & 0x7];
316}
317
318void
319nv04_fifo_isr(struct drm_device *dev)
320{
321 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
322 struct drm_nouveau_private *dev_priv = dev->dev_private;
323 uint32_t status, reassign;
324 int cnt = 0;
325
326 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
327 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
328 uint32_t chid, get;
329
330 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
331
332 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
333 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
334
335 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
336 uint32_t mthd, data;
337 int ptr;
338
339 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
340 * wrapping on my G80 chips, but CACHE1 isn't big
341 * enough for this much data.. Tests show that it
342 * wraps around to the start at GET=0x800.. No clue
343 * as to why..
344 */
345 ptr = (get & 0x7ff) >> 2;
346
347 if (dev_priv->card_type < NV_40) {
348 mthd = nv_rd32(dev,
349 NV04_PFIFO_CACHE1_METHOD(ptr));
350 data = nv_rd32(dev,
351 NV04_PFIFO_CACHE1_DATA(ptr));
352 } else {
353 mthd = nv_rd32(dev,
354 NV40_PFIFO_CACHE1_METHOD(ptr));
355 data = nv_rd32(dev,
356 NV40_PFIFO_CACHE1_DATA(ptr));
357 }
358
359 if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
360 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
361 "Mthd 0x%04x Data 0x%08x\n",
362 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
363 data);
364 }
365
366 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
367 nv_wr32(dev, NV03_PFIFO_INTR_0,
368 NV_PFIFO_INTR_CACHE_ERROR);
369
370 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
371 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
372 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
373 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
374 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
375 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
376
377 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
378 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
379 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
380
381 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
382 }
383
384 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
385 u32 dma_get = nv_rd32(dev, 0x003244);
386 u32 dma_put = nv_rd32(dev, 0x003240);
387 u32 push = nv_rd32(dev, 0x003220);
388 u32 state = nv_rd32(dev, 0x003228);
389
390 if (dev_priv->card_type == NV_50) {
391 u32 ho_get = nv_rd32(dev, 0x003328);
392 u32 ho_put = nv_rd32(dev, 0x003320);
393 u32 ib_get = nv_rd32(dev, 0x003334);
394 u32 ib_put = nv_rd32(dev, 0x003330);
395
396 if (nouveau_ratelimit())
397 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
398 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
399 "State 0x%08x (err: %s) Push 0x%08x\n",
400 chid, ho_get, dma_get, ho_put,
401 dma_put, ib_get, ib_put, state,
402 nv_dma_state_err(state),
403 push);
404
405 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
406 nv_wr32(dev, 0x003364, 0x00000000);
407 if (dma_get != dma_put || ho_get != ho_put) {
408 nv_wr32(dev, 0x003244, dma_put);
409 nv_wr32(dev, 0x003328, ho_put);
410 } else
411 if (ib_get != ib_put) {
412 nv_wr32(dev, 0x003334, ib_put);
413 }
414 } else {
415 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
416 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
417 chid, dma_get, dma_put, state,
418 nv_dma_state_err(state), push);
419
420 if (dma_get != dma_put)
421 nv_wr32(dev, 0x003244, dma_put);
422 }
423
424 nv_wr32(dev, 0x003228, 0x00000000);
425 nv_wr32(dev, 0x003220, 0x00000001);
426 nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
427 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
428 }
429
430 if (status & NV_PFIFO_INTR_SEMAPHORE) {
431 uint32_t sem;
432
433 status &= ~NV_PFIFO_INTR_SEMAPHORE;
434 nv_wr32(dev, NV03_PFIFO_INTR_0,
435 NV_PFIFO_INTR_SEMAPHORE);
436
437 sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
438 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
439
440 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
441 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
442 }
443
444 if (dev_priv->card_type == NV_50) {
445 if (status & 0x00000010) {
446 nv50_fb_vm_trap(dev, nouveau_ratelimit());
447 status &= ~0x00000010;
448 nv_wr32(dev, 0x002100, 0x00000010);
449 }
450 }
451
452 if (status) {
453 if (nouveau_ratelimit())
454 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
455 status, chid);
456 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
457 status = 0;
458 }
459
460 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
461 }
462
463 if (status) {
464 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
465 nv_wr32(dev, 0x2140, 0);
466 nv_wr32(dev, 0x140, 0);
467 }
468
469 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
470}
471
472void
473nv04_fifo_destroy(struct drm_device *dev, int engine)
474{
475 struct drm_nouveau_private *dev_priv = dev->dev_private;
476 struct nv04_fifo_priv *priv = nv_engine(dev, engine);
477
478 nouveau_irq_unregister(dev, 8);
479
480 dev_priv->eng[engine] = NULL;
481 kfree(priv);
482}
483
484int
485nv04_fifo_create(struct drm_device *dev)
486{
487 struct drm_nouveau_private *dev_priv = dev->dev_private;
488 struct nv04_fifo_priv *priv;
489
490 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
491 if (!priv)
492 return -ENOMEM;
493
494 priv->base.base.destroy = nv04_fifo_destroy;
495 priv->base.base.init = nv04_fifo_init;
496 priv->base.base.fini = nv04_fifo_fini;
497 priv->base.base.context_new = nv04_fifo_context_new;
498 priv->base.base.context_del = nv04_fifo_context_del;
499 priv->base.channels = 15;
500 priv->ramfc_desc = nv04_ramfc;
501 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
502
503 nouveau_irq_register(dev, 8, nv04_fifo_isr);
504 return 0;
505}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
deleted file mode 100644
index 68cce6023461..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ /dev/null
@@ -1,1325 +0,0 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/nouveau_drm.h>
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29#include "nouveau_util.h"
30#include "nouveau_ramht.h"
31
32struct nv04_graph_engine {
33 struct nouveau_exec_engine base;
34};
35
36static uint32_t nv04_graph_ctx_regs[] = {
37 0x0040053c,
38 0x00400544,
39 0x00400540,
40 0x00400548,
41 NV04_PGRAPH_CTX_SWITCH1,
42 NV04_PGRAPH_CTX_SWITCH2,
43 NV04_PGRAPH_CTX_SWITCH3,
44 NV04_PGRAPH_CTX_SWITCH4,
45 NV04_PGRAPH_CTX_CACHE1,
46 NV04_PGRAPH_CTX_CACHE2,
47 NV04_PGRAPH_CTX_CACHE3,
48 NV04_PGRAPH_CTX_CACHE4,
49 0x00400184,
50 0x004001a4,
51 0x004001c4,
52 0x004001e4,
53 0x00400188,
54 0x004001a8,
55 0x004001c8,
56 0x004001e8,
57 0x0040018c,
58 0x004001ac,
59 0x004001cc,
60 0x004001ec,
61 0x00400190,
62 0x004001b0,
63 0x004001d0,
64 0x004001f0,
65 0x00400194,
66 0x004001b4,
67 0x004001d4,
68 0x004001f4,
69 0x00400198,
70 0x004001b8,
71 0x004001d8,
72 0x004001f8,
73 0x0040019c,
74 0x004001bc,
75 0x004001dc,
76 0x004001fc,
77 0x00400174,
78 NV04_PGRAPH_DMA_START_0,
79 NV04_PGRAPH_DMA_START_1,
80 NV04_PGRAPH_DMA_LENGTH,
81 NV04_PGRAPH_DMA_MISC,
82 NV04_PGRAPH_DMA_PITCH,
83 NV04_PGRAPH_BOFFSET0,
84 NV04_PGRAPH_BBASE0,
85 NV04_PGRAPH_BLIMIT0,
86 NV04_PGRAPH_BOFFSET1,
87 NV04_PGRAPH_BBASE1,
88 NV04_PGRAPH_BLIMIT1,
89 NV04_PGRAPH_BOFFSET2,
90 NV04_PGRAPH_BBASE2,
91 NV04_PGRAPH_BLIMIT2,
92 NV04_PGRAPH_BOFFSET3,
93 NV04_PGRAPH_BBASE3,
94 NV04_PGRAPH_BLIMIT3,
95 NV04_PGRAPH_BOFFSET4,
96 NV04_PGRAPH_BBASE4,
97 NV04_PGRAPH_BLIMIT4,
98 NV04_PGRAPH_BOFFSET5,
99 NV04_PGRAPH_BBASE5,
100 NV04_PGRAPH_BLIMIT5,
101 NV04_PGRAPH_BPITCH0,
102 NV04_PGRAPH_BPITCH1,
103 NV04_PGRAPH_BPITCH2,
104 NV04_PGRAPH_BPITCH3,
105 NV04_PGRAPH_BPITCH4,
106 NV04_PGRAPH_SURFACE,
107 NV04_PGRAPH_STATE,
108 NV04_PGRAPH_BSWIZZLE2,
109 NV04_PGRAPH_BSWIZZLE5,
110 NV04_PGRAPH_BPIXEL,
111 NV04_PGRAPH_NOTIFY,
112 NV04_PGRAPH_PATT_COLOR0,
113 NV04_PGRAPH_PATT_COLOR1,
114 NV04_PGRAPH_PATT_COLORRAM+0x00,
115 NV04_PGRAPH_PATT_COLORRAM+0x04,
116 NV04_PGRAPH_PATT_COLORRAM+0x08,
117 NV04_PGRAPH_PATT_COLORRAM+0x0c,
118 NV04_PGRAPH_PATT_COLORRAM+0x10,
119 NV04_PGRAPH_PATT_COLORRAM+0x14,
120 NV04_PGRAPH_PATT_COLORRAM+0x18,
121 NV04_PGRAPH_PATT_COLORRAM+0x1c,
122 NV04_PGRAPH_PATT_COLORRAM+0x20,
123 NV04_PGRAPH_PATT_COLORRAM+0x24,
124 NV04_PGRAPH_PATT_COLORRAM+0x28,
125 NV04_PGRAPH_PATT_COLORRAM+0x2c,
126 NV04_PGRAPH_PATT_COLORRAM+0x30,
127 NV04_PGRAPH_PATT_COLORRAM+0x34,
128 NV04_PGRAPH_PATT_COLORRAM+0x38,
129 NV04_PGRAPH_PATT_COLORRAM+0x3c,
130 NV04_PGRAPH_PATT_COLORRAM+0x40,
131 NV04_PGRAPH_PATT_COLORRAM+0x44,
132 NV04_PGRAPH_PATT_COLORRAM+0x48,
133 NV04_PGRAPH_PATT_COLORRAM+0x4c,
134 NV04_PGRAPH_PATT_COLORRAM+0x50,
135 NV04_PGRAPH_PATT_COLORRAM+0x54,
136 NV04_PGRAPH_PATT_COLORRAM+0x58,
137 NV04_PGRAPH_PATT_COLORRAM+0x5c,
138 NV04_PGRAPH_PATT_COLORRAM+0x60,
139 NV04_PGRAPH_PATT_COLORRAM+0x64,
140 NV04_PGRAPH_PATT_COLORRAM+0x68,
141 NV04_PGRAPH_PATT_COLORRAM+0x6c,
142 NV04_PGRAPH_PATT_COLORRAM+0x70,
143 NV04_PGRAPH_PATT_COLORRAM+0x74,
144 NV04_PGRAPH_PATT_COLORRAM+0x78,
145 NV04_PGRAPH_PATT_COLORRAM+0x7c,
146 NV04_PGRAPH_PATT_COLORRAM+0x80,
147 NV04_PGRAPH_PATT_COLORRAM+0x84,
148 NV04_PGRAPH_PATT_COLORRAM+0x88,
149 NV04_PGRAPH_PATT_COLORRAM+0x8c,
150 NV04_PGRAPH_PATT_COLORRAM+0x90,
151 NV04_PGRAPH_PATT_COLORRAM+0x94,
152 NV04_PGRAPH_PATT_COLORRAM+0x98,
153 NV04_PGRAPH_PATT_COLORRAM+0x9c,
154 NV04_PGRAPH_PATT_COLORRAM+0xa0,
155 NV04_PGRAPH_PATT_COLORRAM+0xa4,
156 NV04_PGRAPH_PATT_COLORRAM+0xa8,
157 NV04_PGRAPH_PATT_COLORRAM+0xac,
158 NV04_PGRAPH_PATT_COLORRAM+0xb0,
159 NV04_PGRAPH_PATT_COLORRAM+0xb4,
160 NV04_PGRAPH_PATT_COLORRAM+0xb8,
161 NV04_PGRAPH_PATT_COLORRAM+0xbc,
162 NV04_PGRAPH_PATT_COLORRAM+0xc0,
163 NV04_PGRAPH_PATT_COLORRAM+0xc4,
164 NV04_PGRAPH_PATT_COLORRAM+0xc8,
165 NV04_PGRAPH_PATT_COLORRAM+0xcc,
166 NV04_PGRAPH_PATT_COLORRAM+0xd0,
167 NV04_PGRAPH_PATT_COLORRAM+0xd4,
168 NV04_PGRAPH_PATT_COLORRAM+0xd8,
169 NV04_PGRAPH_PATT_COLORRAM+0xdc,
170 NV04_PGRAPH_PATT_COLORRAM+0xe0,
171 NV04_PGRAPH_PATT_COLORRAM+0xe4,
172 NV04_PGRAPH_PATT_COLORRAM+0xe8,
173 NV04_PGRAPH_PATT_COLORRAM+0xec,
174 NV04_PGRAPH_PATT_COLORRAM+0xf0,
175 NV04_PGRAPH_PATT_COLORRAM+0xf4,
176 NV04_PGRAPH_PATT_COLORRAM+0xf8,
177 NV04_PGRAPH_PATT_COLORRAM+0xfc,
178 NV04_PGRAPH_PATTERN,
179 0x0040080c,
180 NV04_PGRAPH_PATTERN_SHAPE,
181 0x00400600,
182 NV04_PGRAPH_ROP3,
183 NV04_PGRAPH_CHROMA,
184 NV04_PGRAPH_BETA_AND,
185 NV04_PGRAPH_BETA_PREMULT,
186 NV04_PGRAPH_CONTROL0,
187 NV04_PGRAPH_CONTROL1,
188 NV04_PGRAPH_CONTROL2,
189 NV04_PGRAPH_BLEND,
190 NV04_PGRAPH_STORED_FMT,
191 NV04_PGRAPH_SOURCE_COLOR,
192 0x00400560,
193 0x00400568,
194 0x00400564,
195 0x0040056c,
196 0x00400400,
197 0x00400480,
198 0x00400404,
199 0x00400484,
200 0x00400408,
201 0x00400488,
202 0x0040040c,
203 0x0040048c,
204 0x00400410,
205 0x00400490,
206 0x00400414,
207 0x00400494,
208 0x00400418,
209 0x00400498,
210 0x0040041c,
211 0x0040049c,
212 0x00400420,
213 0x004004a0,
214 0x00400424,
215 0x004004a4,
216 0x00400428,
217 0x004004a8,
218 0x0040042c,
219 0x004004ac,
220 0x00400430,
221 0x004004b0,
222 0x00400434,
223 0x004004b4,
224 0x00400438,
225 0x004004b8,
226 0x0040043c,
227 0x004004bc,
228 0x00400440,
229 0x004004c0,
230 0x00400444,
231 0x004004c4,
232 0x00400448,
233 0x004004c8,
234 0x0040044c,
235 0x004004cc,
236 0x00400450,
237 0x004004d0,
238 0x00400454,
239 0x004004d4,
240 0x00400458,
241 0x004004d8,
242 0x0040045c,
243 0x004004dc,
244 0x00400460,
245 0x004004e0,
246 0x00400464,
247 0x004004e4,
248 0x00400468,
249 0x004004e8,
250 0x0040046c,
251 0x004004ec,
252 0x00400470,
253 0x004004f0,
254 0x00400474,
255 0x004004f4,
256 0x00400478,
257 0x004004f8,
258 0x0040047c,
259 0x004004fc,
260 0x00400534,
261 0x00400538,
262 0x00400514,
263 0x00400518,
264 0x0040051c,
265 0x00400520,
266 0x00400524,
267 0x00400528,
268 0x0040052c,
269 0x00400530,
270 0x00400d00,
271 0x00400d40,
272 0x00400d80,
273 0x00400d04,
274 0x00400d44,
275 0x00400d84,
276 0x00400d08,
277 0x00400d48,
278 0x00400d88,
279 0x00400d0c,
280 0x00400d4c,
281 0x00400d8c,
282 0x00400d10,
283 0x00400d50,
284 0x00400d90,
285 0x00400d14,
286 0x00400d54,
287 0x00400d94,
288 0x00400d18,
289 0x00400d58,
290 0x00400d98,
291 0x00400d1c,
292 0x00400d5c,
293 0x00400d9c,
294 0x00400d20,
295 0x00400d60,
296 0x00400da0,
297 0x00400d24,
298 0x00400d64,
299 0x00400da4,
300 0x00400d28,
301 0x00400d68,
302 0x00400da8,
303 0x00400d2c,
304 0x00400d6c,
305 0x00400dac,
306 0x00400d30,
307 0x00400d70,
308 0x00400db0,
309 0x00400d34,
310 0x00400d74,
311 0x00400db4,
312 0x00400d38,
313 0x00400d78,
314 0x00400db8,
315 0x00400d3c,
316 0x00400d7c,
317 0x00400dbc,
318 0x00400590,
319 0x00400594,
320 0x00400598,
321 0x0040059c,
322 0x004005a8,
323 0x004005ac,
324 0x004005b0,
325 0x004005b4,
326 0x004005c0,
327 0x004005c4,
328 0x004005c8,
329 0x004005cc,
330 0x004005d0,
331 0x004005d4,
332 0x004005d8,
333 0x004005dc,
334 0x004005e0,
335 NV04_PGRAPH_PASSTHRU_0,
336 NV04_PGRAPH_PASSTHRU_1,
337 NV04_PGRAPH_PASSTHRU_2,
338 NV04_PGRAPH_DVD_COLORFMT,
339 NV04_PGRAPH_SCALED_FORMAT,
340 NV04_PGRAPH_MISC24_0,
341 NV04_PGRAPH_MISC24_1,
342 NV04_PGRAPH_MISC24_2,
343 0x00400500,
344 0x00400504,
345 NV04_PGRAPH_VALID1,
346 NV04_PGRAPH_VALID2,
347 NV04_PGRAPH_DEBUG_3
348};
349
350struct graph_state {
351 uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
352};
353
354static struct nouveau_channel *
355nv04_graph_channel(struct drm_device *dev)
356{
357 struct drm_nouveau_private *dev_priv = dev->dev_private;
358 int chid = 15;
359
360 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
361 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
362
363 if (chid > 15)
364 return NULL;
365
366 return dev_priv->channels.ptr[chid];
367}
368
369static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
370{
371 int i;
372
373 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
374 if (nv04_graph_ctx_regs[i] == reg)
375 return &ctx->nv04[i];
376 }
377
378 return NULL;
379}
380
381static int
382nv04_graph_load_context(struct nouveau_channel *chan)
383{
384 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
385 struct drm_device *dev = chan->dev;
386 uint32_t tmp;
387 int i;
388
389 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
390 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
391
392 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
393
394 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
395 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
396
397 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
398 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
399
400 return 0;
401}
402
403static int
404nv04_graph_unload_context(struct drm_device *dev)
405{
406 struct nouveau_channel *chan = NULL;
407 struct graph_state *ctx;
408 uint32_t tmp;
409 int i;
410
411 chan = nv04_graph_channel(dev);
412 if (!chan)
413 return 0;
414 ctx = chan->engctx[NVOBJ_ENGINE_GR];
415
416 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
417 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
418
419 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
420 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
421 tmp |= 15 << 24;
422 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
423 return 0;
424}
425
426static int
427nv04_graph_context_new(struct nouveau_channel *chan, int engine)
428{
429 struct graph_state *pgraph_ctx;
430 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
431
432 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
433 if (pgraph_ctx == NULL)
434 return -ENOMEM;
435
436 *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
437
438 chan->engctx[engine] = pgraph_ctx;
439 return 0;
440}
441
442static void
443nv04_graph_context_del(struct nouveau_channel *chan, int engine)
444{
445 struct drm_device *dev = chan->dev;
446 struct drm_nouveau_private *dev_priv = dev->dev_private;
447 struct graph_state *pgraph_ctx = chan->engctx[engine];
448 unsigned long flags;
449
450 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
451 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
452
453 /* Unload the context if it's the currently active one */
454 if (nv04_graph_channel(dev) == chan)
455 nv04_graph_unload_context(dev);
456
457 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
458 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
459
460 /* Free the context resources */
461 kfree(pgraph_ctx);
462 chan->engctx[engine] = NULL;
463}
464
465int
466nv04_graph_object_new(struct nouveau_channel *chan, int engine,
467 u32 handle, u16 class)
468{
469 struct drm_device *dev = chan->dev;
470 struct nouveau_gpuobj *obj = NULL;
471 int ret;
472
473 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
474 if (ret)
475 return ret;
476 obj->engine = 1;
477 obj->class = class;
478
479#ifdef __BIG_ENDIAN
480 nv_wo32(obj, 0x00, 0x00080000 | class);
481#else
482 nv_wo32(obj, 0x00, class);
483#endif
484 nv_wo32(obj, 0x04, 0x00000000);
485 nv_wo32(obj, 0x08, 0x00000000);
486 nv_wo32(obj, 0x0c, 0x00000000);
487
488 ret = nouveau_ramht_insert(chan, handle, obj);
489 nouveau_gpuobj_ref(NULL, &obj);
490 return ret;
491}
492
493static int
494nv04_graph_init(struct drm_device *dev, int engine)
495{
496 uint32_t tmp;
497
498 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
499 ~NV_PMC_ENABLE_PGRAPH);
500 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
501 NV_PMC_ENABLE_PGRAPH);
502
503 /* Enable PGRAPH interrupts */
504 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
505 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
506
507 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
508 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
509 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
510 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
511 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
512 /*1231C000 blob, 001 haiku*/
513 /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
514 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
515 /*0x72111100 blob , 01 haiku*/
516 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
517 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
518 /*haiku same*/
519
520 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
521 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
522 /*haiku and blob 10d4*/
523
524 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
525 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
526 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
527 tmp |= 15 << 24;
528 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
529
530 /* These don't belong here, they're part of a per-channel context */
531 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
532 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
533
534 return 0;
535}
536
537static int
538nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
539{
540 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
541 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
542 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
543 return -EBUSY;
544 }
545 nv04_graph_unload_context(dev);
546 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
547 return 0;
548}
549
550/*
551 * Software methods, why they are needed, and how they all work:
552 *
553 * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
554 * 2d engine settings are kept inside the grobjs themselves. The grobjs are
555 * 3 words long on both. grobj format on NV04 is:
556 *
557 * word 0:
558 * - bits 0-7: class
559 * - bit 12: color key active
560 * - bit 13: clip rect active
561 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
562 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
563 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
564 * NV03_CONTEXT_SURFACE_DST].
565 * - bits 15-17: 2d operation [aka patch config]
566 * - bit 24: patch valid [enables rendering using this object]
567 * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
568 * word 1:
569 * - bits 0-1: mono format
570 * - bits 8-13: color format
571 * - bits 16-31: DMA_NOTIFY instance
572 * word 2:
573 * - bits 0-15: DMA_A instance
574 * - bits 16-31: DMA_B instance
575 *
576 * On NV05 it's:
577 *
578 * word 0:
579 * - bits 0-7: class
580 * - bit 12: color key active
581 * - bit 13: clip rect active
582 * - bit 14: if set, destination surface is swizzled and taken from buffer 5
583 * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
584 * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
585 * NV03_CONTEXT_SURFACE_DST].
586 * - bits 15-17: 2d operation [aka patch config]
587 * - bits 20-22: dither mode
588 * - bit 24: patch valid [enables rendering using this object]
589 * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
590 * - bit 26: surface_src/surface_zeta valid
591 * - bit 27: pattern valid
592 * - bit 28: rop valid
593 * - bit 29: beta1 valid
594 * - bit 30: beta4 valid
595 * word 1:
596 * - bits 0-1: mono format
597 * - bits 8-13: color format
598 * - bits 16-31: DMA_NOTIFY instance
599 * word 2:
600 * - bits 0-15: DMA_A instance
601 * - bits 16-31: DMA_B instance
602 *
603 * NV05 will set/unset the relevant valid bits when you poke the relevant
604 * object-binding methods with object of the proper type, or with the NULL
605 * type. It'll only allow rendering using the grobj if all needed objects
606 * are bound. The needed set of objects depends on selected operation: for
607 * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
608 *
609 * NV04 doesn't have these methods implemented at all, and doesn't have the
610 * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
611 * is set. So we have to emulate them in software, internally keeping the
612 * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
613 * but the last word isn't actually used for anything, we abuse it for this
614 * purpose.
615 *
616 * Actually, NV05 can optionally check bit 24 too, but we disable this since
617 * there's no use for it.
618 *
619 * For unknown reasons, NV04 implements surf3d binding in hardware as an
620 * exception. Also for unknown reasons, NV04 doesn't implement the clipping
621 * methods on the surf3d object, so we have to emulate them too.
622 */
623
624static void
625nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
626{
627 struct drm_device *dev = chan->dev;
628 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
629 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
630 u32 tmp;
631
632 tmp = nv_ri32(dev, instance);
633 tmp &= ~mask;
634 tmp |= value;
635
636 nv_wi32(dev, instance, tmp);
637 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
638 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
639}
640
641static void
642nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
643{
644 struct drm_device *dev = chan->dev;
645 u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
646 u32 tmp, ctx1;
647 int class, op, valid = 1;
648
649 ctx1 = nv_ri32(dev, instance);
650 class = ctx1 & 0xff;
651 op = (ctx1 >> 15) & 7;
652 tmp = nv_ri32(dev, instance + 0xc);
653 tmp &= ~mask;
654 tmp |= value;
655 nv_wi32(dev, instance + 0xc, tmp);
656
657 /* check for valid surf2d/surf_dst/surf_color */
658 if (!(tmp & 0x02000000))
659 valid = 0;
660 /* check for valid surf_src/surf_zeta */
661 if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
662 valid = 0;
663
664 switch (op) {
665 /* SRCCOPY_AND, SRCCOPY: no extra objects required */
666 case 0:
667 case 3:
668 break;
669 /* ROP_AND: requires pattern and rop */
670 case 1:
671 if (!(tmp & 0x18000000))
672 valid = 0;
673 break;
674 /* BLEND_AND: requires beta1 */
675 case 2:
676 if (!(tmp & 0x20000000))
677 valid = 0;
678 break;
679 /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
680 case 4:
681 case 5:
682 if (!(tmp & 0x40000000))
683 valid = 0;
684 break;
685 }
686
687 nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
688}
689
690static int
691nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
692 u32 class, u32 mthd, u32 data)
693{
694 if (data > 5)
695 return 1;
696 /* Old versions of the objects only accept first three operations. */
697 if (data > 2 && class < 0x40)
698 return 1;
699 nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
700 /* changing operation changes set of objects needed for validation */
701 nv04_graph_set_ctx_val(chan, 0, 0);
702 return 0;
703}
704
705static int
706nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
707 u32 class, u32 mthd, u32 data)
708{
709 uint32_t min = data & 0xffff, max;
710 uint32_t w = data >> 16;
711 if (min & 0x8000)
712 /* too large */
713 return 1;
714 if (w & 0x8000)
715 /* yes, it accepts negative for some reason. */
716 w |= 0xffff0000;
717 max = min + w;
718 max &= 0x3ffff;
719 nv_wr32(chan->dev, 0x40053c, min);
720 nv_wr32(chan->dev, 0x400544, max);
721 return 0;
722}
723
724static int
725nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
726 u32 class, u32 mthd, u32 data)
727{
728 uint32_t min = data & 0xffff, max;
729 uint32_t w = data >> 16;
730 if (min & 0x8000)
731 /* too large */
732 return 1;
733 if (w & 0x8000)
734 /* yes, it accepts negative for some reason. */
735 w |= 0xffff0000;
736 max = min + w;
737 max &= 0x3ffff;
738 nv_wr32(chan->dev, 0x400540, min);
739 nv_wr32(chan->dev, 0x400548, max);
740 return 0;
741}
742
743static int
744nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
745 u32 class, u32 mthd, u32 data)
746{
747 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
748 case 0x30:
749 nv04_graph_set_ctx1(chan, 0x00004000, 0);
750 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
751 return 0;
752 case 0x42:
753 nv04_graph_set_ctx1(chan, 0x00004000, 0);
754 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
755 return 0;
756 }
757 return 1;
758}
759
760static int
761nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
762 u32 class, u32 mthd, u32 data)
763{
764 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
765 case 0x30:
766 nv04_graph_set_ctx1(chan, 0x00004000, 0);
767 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
768 return 0;
769 case 0x42:
770 nv04_graph_set_ctx1(chan, 0x00004000, 0);
771 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
772 return 0;
773 case 0x52:
774 nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
775 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
776 return 0;
777 }
778 return 1;
779}
780
781static int
782nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
783 u32 class, u32 mthd, u32 data)
784{
785 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
786 case 0x30:
787 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
788 return 0;
789 case 0x18:
790 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
791 return 0;
792 }
793 return 1;
794}
795
796static int
797nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
798 u32 class, u32 mthd, u32 data)
799{
800 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
801 case 0x30:
802 nv04_graph_set_ctx_val(chan, 0x08000000, 0);
803 return 0;
804 case 0x44:
805 nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
806 return 0;
807 }
808 return 1;
809}
810
811static int
812nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
813 u32 class, u32 mthd, u32 data)
814{
815 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
816 case 0x30:
817 nv04_graph_set_ctx_val(chan, 0x10000000, 0);
818 return 0;
819 case 0x43:
820 nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
821 return 0;
822 }
823 return 1;
824}
825
826static int
827nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
828 u32 class, u32 mthd, u32 data)
829{
830 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
831 case 0x30:
832 nv04_graph_set_ctx_val(chan, 0x20000000, 0);
833 return 0;
834 case 0x12:
835 nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
836 return 0;
837 }
838 return 1;
839}
840
841static int
842nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
843 u32 class, u32 mthd, u32 data)
844{
845 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
846 case 0x30:
847 nv04_graph_set_ctx_val(chan, 0x40000000, 0);
848 return 0;
849 case 0x72:
850 nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
851 return 0;
852 }
853 return 1;
854}
855
856static int
857nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
858 u32 class, u32 mthd, u32 data)
859{
860 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
861 case 0x30:
862 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
863 return 0;
864 case 0x58:
865 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
866 return 0;
867 }
868 return 1;
869}
870
871static int
872nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
873 u32 class, u32 mthd, u32 data)
874{
875 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
876 case 0x30:
877 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
878 return 0;
879 case 0x59:
880 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
881 return 0;
882 }
883 return 1;
884}
885
886static int
887nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
888 u32 class, u32 mthd, u32 data)
889{
890 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
891 case 0x30:
892 nv04_graph_set_ctx_val(chan, 0x02000000, 0);
893 return 0;
894 case 0x5a:
895 nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
896 return 0;
897 }
898 return 1;
899}
900
901static int
902nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
903 u32 class, u32 mthd, u32 data)
904{
905 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
906 case 0x30:
907 nv04_graph_set_ctx_val(chan, 0x04000000, 0);
908 return 0;
909 case 0x5b:
910 nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
911 return 0;
912 }
913 return 1;
914}
915
916static int
917nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
918 u32 class, u32 mthd, u32 data)
919{
920 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
921 case 0x30:
922 nv04_graph_set_ctx1(chan, 0x2000, 0);
923 return 0;
924 case 0x19:
925 nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
926 return 0;
927 }
928 return 1;
929}
930
931static int
932nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
933 u32 class, u32 mthd, u32 data)
934{
935 switch (nv_ri32(chan->dev, data << 4) & 0xff) {
936 case 0x30:
937 nv04_graph_set_ctx1(chan, 0x1000, 0);
938 return 0;
939 /* Yes, for some reason even the old versions of objects
940 * accept 0x57 and not 0x17. Consistency be damned.
941 */
942 case 0x57:
943 nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
944 return 0;
945 }
946 return 1;
947}
948
949static struct nouveau_bitfield nv04_graph_intr[] = {
950 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
951 {}
952};
953
954static struct nouveau_bitfield nv04_graph_nstatus[] = {
955 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
956 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
957 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
958 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
959 {}
960};
961
962struct nouveau_bitfield nv04_graph_nsource[] = {
963 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
964 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
965 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
966 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
967 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
968 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
969 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
970 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
971 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
972 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
973 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
974 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
975 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
976 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
977 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
978 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
979 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
980 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
981 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
982 {}
983};
984
985static void
986nv04_graph_context_switch(struct drm_device *dev)
987{
988 struct drm_nouveau_private *dev_priv = dev->dev_private;
989 struct nouveau_channel *chan = NULL;
990 int chid;
991
992 nouveau_wait_for_idle(dev);
993
994 /* If previous context is valid, we need to save it */
995 nv04_graph_unload_context(dev);
996
997 /* Load context for next channel */
998 chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
999 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
1000 chan = dev_priv->channels.ptr[chid];
1001 if (chan)
1002 nv04_graph_load_context(chan);
1003}
1004
1005static void
1006nv04_graph_isr(struct drm_device *dev)
1007{
1008 u32 stat;
1009
1010 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1011 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1012 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1013 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1014 u32 chid = (addr & 0x0f000000) >> 24;
1015 u32 subc = (addr & 0x0000e000) >> 13;
1016 u32 mthd = (addr & 0x00001ffc);
1017 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1018 u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
1019 u32 show = stat;
1020
1021 if (stat & NV_PGRAPH_INTR_NOTIFY) {
1022 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1023 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1024 show &= ~NV_PGRAPH_INTR_NOTIFY;
1025 }
1026 }
1027
1028 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1029 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1030 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1031 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1032 nv04_graph_context_switch(dev);
1033 }
1034
1035 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1036 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1037
1038 if (show && nouveau_ratelimit()) {
1039 NV_INFO(dev, "PGRAPH -");
1040 nouveau_bitfield_print(nv04_graph_intr, show);
1041 printk(" nsource:");
1042 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1043 printk(" nstatus:");
1044 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1045 printk("\n");
1046 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1047 "mthd 0x%04x data 0x%08x\n",
1048 chid, subc, class, mthd, data);
1049 }
1050 }
1051}
1052
1053static void
1054nv04_graph_destroy(struct drm_device *dev, int engine)
1055{
1056 struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
1057
1058 nouveau_irq_unregister(dev, 12);
1059
1060 NVOBJ_ENGINE_DEL(dev, GR);
1061 kfree(pgraph);
1062}
1063
1064int
1065nv04_graph_create(struct drm_device *dev)
1066{
1067 struct nv04_graph_engine *pgraph;
1068
1069 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1070 if (!pgraph)
1071 return -ENOMEM;
1072
1073 pgraph->base.destroy = nv04_graph_destroy;
1074 pgraph->base.init = nv04_graph_init;
1075 pgraph->base.fini = nv04_graph_fini;
1076 pgraph->base.context_new = nv04_graph_context_new;
1077 pgraph->base.context_del = nv04_graph_context_del;
1078 pgraph->base.object_new = nv04_graph_object_new;
1079
1080 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1081 nouveau_irq_register(dev, 12, nv04_graph_isr);
1082
1083 /* dvd subpicture */
1084 NVOBJ_CLASS(dev, 0x0038, GR);
1085
1086 /* m2mf */
1087 NVOBJ_CLASS(dev, 0x0039, GR);
1088
1089 /* nv03 gdirect */
1090 NVOBJ_CLASS(dev, 0x004b, GR);
1091 NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
1092 NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
1093 NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
1094 NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
1095 NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
1096
1097 /* nv04 gdirect */
1098 NVOBJ_CLASS(dev, 0x004a, GR);
1099 NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1100 NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
1101 NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
1102 NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
1103 NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
1104 NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
1105
1106 /* nv01 imageblit */
1107 NVOBJ_CLASS(dev, 0x001f, GR);
1108 NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
1109 NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
1110 NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1111 NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
1112 NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
1113 NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
1114 NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
1115 NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
1116
1117 /* nv04 imageblit */
1118 NVOBJ_CLASS(dev, 0x005f, GR);
1119 NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
1120 NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
1121 NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1122 NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
1123 NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
1124 NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
1125 NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
1126 NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
1127
1128 /* nv04 iifc */
1129 NVOBJ_CLASS(dev, 0x0060, GR);
1130 NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
1131 NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
1132 NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
1133 NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
1134 NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
1135 NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
1136 NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
1137 NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
1138
1139 /* nv05 iifc */
1140 NVOBJ_CLASS(dev, 0x0064, GR);
1141
1142 /* nv01 ifc */
1143 NVOBJ_CLASS(dev, 0x0021, GR);
1144 NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
1145 NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
1146 NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
1147 NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
1148 NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
1149 NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
1150 NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
1151
1152 /* nv04 ifc */
1153 NVOBJ_CLASS(dev, 0x0061, GR);
1154 NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
1155 NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
1156 NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
1157 NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
1158 NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
1159 NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
1160 NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
1161 NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
1162
1163 /* nv05 ifc */
1164 NVOBJ_CLASS(dev, 0x0065, GR);
1165
1166 /* nv03 sifc */
1167 NVOBJ_CLASS(dev, 0x0036, GR);
1168 NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
1169 NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1170 NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
1171 NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
1172 NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
1173 NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
1174
1175 /* nv04 sifc */
1176 NVOBJ_CLASS(dev, 0x0076, GR);
1177 NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
1178 NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1179 NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
1180 NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
1181 NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
1182 NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
1183 NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
1184
1185 /* nv05 sifc */
1186 NVOBJ_CLASS(dev, 0x0066, GR);
1187
1188 /* nv03 sifm */
1189 NVOBJ_CLASS(dev, 0x0037, GR);
1190 NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1191 NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
1192 NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
1193 NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
1194 NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
1195
1196 /* nv04 sifm */
1197 NVOBJ_CLASS(dev, 0x0077, GR);
1198 NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1199 NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
1200 NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
1201 NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
1202 NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
1203 NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
1204
1205 /* null */
1206 NVOBJ_CLASS(dev, 0x0030, GR);
1207
1208 /* surf2d */
1209 NVOBJ_CLASS(dev, 0x0042, GR);
1210
1211 /* rop */
1212 NVOBJ_CLASS(dev, 0x0043, GR);
1213
1214 /* beta1 */
1215 NVOBJ_CLASS(dev, 0x0012, GR);
1216
1217 /* beta4 */
1218 NVOBJ_CLASS(dev, 0x0072, GR);
1219
1220 /* cliprect */
1221 NVOBJ_CLASS(dev, 0x0019, GR);
1222
1223 /* nv01 pattern */
1224 NVOBJ_CLASS(dev, 0x0018, GR);
1225
1226 /* nv04 pattern */
1227 NVOBJ_CLASS(dev, 0x0044, GR);
1228
1229 /* swzsurf */
1230 NVOBJ_CLASS(dev, 0x0052, GR);
1231
1232 /* surf3d */
1233 NVOBJ_CLASS(dev, 0x0053, GR);
1234 NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
1235 NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
1236
1237 /* nv03 tex_tri */
1238 NVOBJ_CLASS(dev, 0x0048, GR);
1239 NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
1240 NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
1241 NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
1242
1243 /* tex_tri */
1244 NVOBJ_CLASS(dev, 0x0054, GR);
1245
1246 /* multitex_tri */
1247 NVOBJ_CLASS(dev, 0x0055, GR);
1248
1249 /* nv01 chroma */
1250 NVOBJ_CLASS(dev, 0x0017, GR);
1251
1252 /* nv04 chroma */
1253 NVOBJ_CLASS(dev, 0x0057, GR);
1254
1255 /* surf_dst */
1256 NVOBJ_CLASS(dev, 0x0058, GR);
1257
1258 /* surf_src */
1259 NVOBJ_CLASS(dev, 0x0059, GR);
1260
1261 /* surf_color */
1262 NVOBJ_CLASS(dev, 0x005a, GR);
1263
1264 /* surf_zeta */
1265 NVOBJ_CLASS(dev, 0x005b, GR);
1266
1267 /* nv01 line */
1268 NVOBJ_CLASS(dev, 0x001c, GR);
1269 NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
1270 NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1271 NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
1272 NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
1273 NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
1274 NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
1275
1276 /* nv04 line */
1277 NVOBJ_CLASS(dev, 0x005c, GR);
1278 NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
1279 NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1280 NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
1281 NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
1282 NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
1283 NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
1284 NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
1285
1286 /* nv01 tri */
1287 NVOBJ_CLASS(dev, 0x001d, GR);
1288 NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
1289 NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1290 NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
1291 NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
1292 NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
1293 NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
1294
1295 /* nv04 tri */
1296 NVOBJ_CLASS(dev, 0x005d, GR);
1297 NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
1298 NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1299 NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
1300 NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
1301 NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
1302 NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
1303 NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
1304
1305 /* nv01 rect */
1306 NVOBJ_CLASS(dev, 0x001e, GR);
1307 NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
1308 NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
1309 NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
1310 NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
1311 NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
1312 NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
1313
1314 /* nv04 rect */
1315 NVOBJ_CLASS(dev, 0x005e, GR);
1316 NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
1317 NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
1318 NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
1319 NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
1320 NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
1321 NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
1322 NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
1323
1324 return 0;
1325}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
deleted file mode 100644
index a9e380040fea..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ /dev/null
@@ -1,192 +0,0 @@
1#include <drm/drmP.h>
2
3#include "nouveau_drv.h"
4#include "nouveau_fifo.h"
5#include "nouveau_ramht.h"
6
7/* returns the size of fifo context */
8static int
9nouveau_fifo_ctx_size(struct drm_device *dev)
10{
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12
13 if (dev_priv->chipset >= 0x40)
14 return 128 * 32;
15 else
16 if (dev_priv->chipset >= 0x17)
17 return 64 * 32;
18 else
19 if (dev_priv->chipset >= 0x10)
20 return 32 * 32;
21
22 return 32 * 16;
23}
24
25int nv04_instmem_init(struct drm_device *dev)
26{
27 struct drm_nouveau_private *dev_priv = dev->dev_private;
28 struct nouveau_gpuobj *ramht = NULL;
29 u32 offset, length;
30 int ret;
31
32 /* RAMIN always available */
33 dev_priv->ramin_available = true;
34
35 /* Reserve space at end of VRAM for PRAMIN */
36 if (dev_priv->card_type >= NV_40) {
37 u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
38 u32 rsvd;
39
40 /* estimate grctx size, the magics come from nv40_grctx.c */
41 if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
42 else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
43 else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
44 else rsvd = 0x4a40 * vs;
45 rsvd += 16 * 1024;
46 rsvd *= 32; /* per-channel */
47
48 rsvd += 512 * 1024; /* pci(e)gart table */
49 rsvd += 512 * 1024; /* object storage */
50
51 dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
52 } else {
53 dev_priv->ramin_rsvd_vram = 512 * 1024;
54 }
55
56 /* Setup shared RAMHT */
57 ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
58 NVOBJ_FLAG_ZERO_ALLOC, &ramht);
59 if (ret)
60 return ret;
61
62 ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
63 nouveau_gpuobj_ref(NULL, &ramht);
64 if (ret)
65 return ret;
66
67 /* And RAMRO */
68 ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
69 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
70 if (ret)
71 return ret;
72
73 /* And RAMFC */
74 length = nouveau_fifo_ctx_size(dev);
75 switch (dev_priv->card_type) {
76 case NV_40:
77 offset = 0x20000;
78 break;
79 default:
80 offset = 0x11400;
81 break;
82 }
83
84 ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
85 NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
86 if (ret)
87 return ret;
88
89 /* Only allow space after RAMFC to be used for object allocation */
90 offset += length;
91
92 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
93 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
94 * ("new style" control) the upper 16-bits of 0x2220 points at this
95 * other mysterious table that's clobbering important things.
96 *
97 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
98 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
99 */
100 if (dev_priv->card_type >= NV_40) {
101 if (offset < 0x40000)
102 offset = 0x40000;
103 }
104
105 ret = drm_mm_init(&dev_priv->ramin_heap, offset,
106 dev_priv->ramin_rsvd_vram - offset);
107 if (ret) {
108 NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
109 return ret;
110 }
111
112 return 0;
113}
114
115void
116nv04_instmem_takedown(struct drm_device *dev)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119
120 nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
121 nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
122 nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
123
124 if (drm_mm_initialized(&dev_priv->ramin_heap))
125 drm_mm_takedown(&dev_priv->ramin_heap);
126}
127
128int
129nv04_instmem_suspend(struct drm_device *dev)
130{
131 return 0;
132}
133
134void
135nv04_instmem_resume(struct drm_device *dev)
136{
137}
138
139int
140nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
141 u32 size, u32 align)
142{
143 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
144 struct drm_mm_node *ramin = NULL;
145
146 do {
147 if (drm_mm_pre_get(&dev_priv->ramin_heap))
148 return -ENOMEM;
149
150 spin_lock(&dev_priv->ramin_lock);
151 ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
152 if (ramin == NULL) {
153 spin_unlock(&dev_priv->ramin_lock);
154 return -ENOMEM;
155 }
156
157 ramin = drm_mm_get_block_atomic(ramin, size, align);
158 spin_unlock(&dev_priv->ramin_lock);
159 } while (ramin == NULL);
160
161 gpuobj->node = ramin;
162 gpuobj->vinst = ramin->start;
163 return 0;
164}
165
166void
167nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
168{
169 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
170
171 spin_lock(&dev_priv->ramin_lock);
172 drm_mm_put_block(gpuobj->node);
173 gpuobj->node = NULL;
174 spin_unlock(&dev_priv->ramin_lock);
175}
176
177int
178nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
179{
180 gpuobj->pinst = gpuobj->vinst;
181 return 0;
182}
183
184void
185nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
186{
187}
188
189void
190nv04_instmem_flush(struct drm_device *dev)
191{
192}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
deleted file mode 100644
index 83751e7a3309..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_mc.c
+++ /dev/null
@@ -1,23 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5int
6nv04_mc_init(struct drm_device *dev)
7{
8 /* Power up everything, resetting each individual unit will
9 * be done later if needed.
10 */
11
12 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
13
14 /* Disable PROM access. */
15 nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
16
17 return 0;
18}
19
20void
21nv04_mc_takedown(struct drm_device *dev)
22{
23}
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 435b5a832da3..2a0cc9d0614a 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -23,10 +23,15 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_reg.h"
27#include "nouveau_hw.h" 28#include "nouveau_hw.h"
28#include "nouveau_pm.h" 29#include "nouveau_pm.h"
29 30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
30int 35int
31nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 36nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
32{ 37{
@@ -46,7 +51,7 @@ nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
46} 51}
47 52
48struct nv04_pm_clock { 53struct nv04_pm_clock {
49 struct pll_lims pll; 54 struct nvbios_pll pll;
50 struct nouveau_pll_vals calc; 55 struct nouveau_pll_vals calc;
51}; 56};
52 57
@@ -58,13 +63,16 @@ struct nv04_pm_state {
58static int 63static int
59calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk) 64calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
60{ 65{
66 struct nouveau_device *device = nouveau_dev(dev);
67 struct nouveau_bios *bios = nouveau_bios(device);
68 struct nouveau_clock *pclk = nouveau_clock(device);
61 int ret; 69 int ret;
62 70
63 ret = get_pll_limits(dev, id, &clk->pll); 71 ret = nvbios_pll_parse(bios, id, &clk->pll);
64 if (ret) 72 if (ret)
65 return ret; 73 return ret;
66 74
67 ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc); 75 ret = pclk->pll_calc(pclk, &clk->pll, khz, &clk->calc);
68 if (!ret) 76 if (!ret)
69 return -EINVAL; 77 return -EINVAL;
70 78
@@ -100,37 +108,38 @@ error:
100static void 108static void
101prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk) 109prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
102{ 110{
103 struct drm_nouveau_private *dev_priv = dev->dev_private; 111 struct nouveau_device *device = nouveau_dev(dev);
112 struct nouveau_clock *pclk = nouveau_clock(device);
104 u32 reg = clk->pll.reg; 113 u32 reg = clk->pll.reg;
105 114
106 /* thank the insane nouveau_hw_setpll() interface for this */ 115 /* thank the insane nouveau_hw_setpll() interface for this */
107 if (dev_priv->card_type >= NV_40) 116 if (device->card_type >= NV_40)
108 reg += 4; 117 reg += 4;
109 118
110 nouveau_hw_setpll(dev, reg, &clk->calc); 119 pclk->pll_prog(pclk, reg, &clk->calc);
111} 120}
112 121
113int 122int
114nv04_pm_clocks_set(struct drm_device *dev, void *pre_state) 123nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
115{ 124{
116 struct drm_nouveau_private *dev_priv = dev->dev_private; 125 struct nouveau_device *device = nouveau_dev(dev);
117 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; 126 struct nouveau_timer *ptimer = nouveau_timer(device);
118 struct nv04_pm_state *state = pre_state; 127 struct nv04_pm_state *state = pre_state;
119 128
120 prog_pll(dev, &state->core); 129 prog_pll(dev, &state->core);
121 130
122 if (state->memory.pll.reg) { 131 if (state->memory.pll.reg) {
123 prog_pll(dev, &state->memory); 132 prog_pll(dev, &state->memory);
124 if (dev_priv->card_type < NV_30) { 133 if (device->card_type < NV_30) {
125 if (dev_priv->card_type == NV_20) 134 if (device->card_type == NV_20)
126 nv_mask(dev, 0x1002c4, 0, 1 << 20); 135 nv_mask(device, 0x1002c4, 0, 1 << 20);
127 136
128 /* Reset the DLLs */ 137 /* Reset the DLLs */
129 nv_mask(dev, 0x1002c0, 0, 1 << 8); 138 nv_mask(device, 0x1002c0, 0, 1 << 8);
130 } 139 }
131 } 140 }
132 141
133 ptimer->init(dev); 142 nv_ofuncs(ptimer)->init(nv_object(ptimer));
134 143
135 kfree(state); 144 kfree(state);
136 return 0; 145 return 0;
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
deleted file mode 100644
index 02509e715693..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30#include "nouveau_software.h"
31#include "nouveau_hw.h"
32
33struct nv04_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv04_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44
45 struct nouveau_page_flip_state state;
46
47 if (!nouveau_finish_page_flip(chan, &state)) {
48 nv_set_crtc_base(chan->dev, state.crtc, state.offset +
49 state.y * state.pitch +
50 state.x * state.bpp / 8);
51 }
52
53 return 0;
54}
55
56static int
57nv04_software_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct nv04_software_chan *pch;
60
61 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
62 if (!pch)
63 return -ENOMEM;
64
65 nouveau_software_context_new(&pch->base);
66 chan->engctx[engine] = pch;
67 return 0;
68}
69
70static void
71nv04_software_context_del(struct nouveau_channel *chan, int engine)
72{
73 struct nv04_software_chan *pch = chan->engctx[engine];
74 chan->engctx[engine] = NULL;
75 kfree(pch);
76}
77
78static int
79nv04_software_object_new(struct nouveau_channel *chan, int engine,
80 u32 handle, u16 class)
81{
82 struct drm_device *dev = chan->dev;
83 struct nouveau_gpuobj *obj = NULL;
84 int ret;
85
86 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
87 if (ret)
88 return ret;
89 obj->engine = 0;
90 obj->class = class;
91
92 ret = nouveau_ramht_insert(chan, handle, obj);
93 nouveau_gpuobj_ref(NULL, &obj);
94 return ret;
95}
96
97static int
98nv04_software_init(struct drm_device *dev, int engine)
99{
100 return 0;
101}
102
103static int
104nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
105{
106 return 0;
107}
108
109static void
110nv04_software_destroy(struct drm_device *dev, int engine)
111{
112 struct nv04_software_priv *psw = nv_engine(dev, engine);
113
114 NVOBJ_ENGINE_DEL(dev, SW);
115 kfree(psw);
116}
117
118int
119nv04_software_create(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv04_software_priv *psw;
123
124 psw = kzalloc(sizeof(*psw), GFP_KERNEL);
125 if (!psw)
126 return -ENOMEM;
127
128 psw->base.base.destroy = nv04_software_destroy;
129 psw->base.base.init = nv04_software_init;
130 psw->base.base.fini = nv04_software_fini;
131 psw->base.base.context_new = nv04_software_context_new;
132 psw->base.base.context_del = nv04_software_context_del;
133 psw->base.base.object_new = nv04_software_object_new;
134 nouveau_software_create(&psw->base);
135
136 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
137 if (dev_priv->card_type <= NV_04) {
138 NVOBJ_CLASS(dev, 0x006e, SW);
139 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
140 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
141 } else {
142 NVOBJ_CLASS(dev, 0x016e, SW);
143 NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
144 }
145
146 return 0;
147}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
deleted file mode 100644
index 71ad319affcb..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ /dev/null
@@ -1,83 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4#include "nouveau_hw.h"
5
6int
7nv04_timer_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 u32 m, n, d;
11
12 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
13 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
14
15 /* aim for 31.25MHz, which gives us nanosecond timestamps */
16 d = 1000000 / 32;
17
18 /* determine base clock for timer source */
19 if (dev_priv->chipset < 0x40) {
20 n = nouveau_hw_get_clock(dev, PLL_CORE);
21 } else
22 if (dev_priv->chipset == 0x40) {
23 /*XXX: figure this out */
24 n = 0;
25 } else {
26 n = dev_priv->crystal;
27 m = 1;
28 while (n < (d * 2)) {
29 n += (n / m);
30 m++;
31 }
32
33 nv_wr32(dev, 0x009220, m - 1);
34 }
35
36 if (!n) {
37 NV_WARN(dev, "PTIMER: unknown input clock freq\n");
38 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
39 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
40 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
41 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
42 }
43 return 0;
44 }
45
46 /* reduce ratio to acceptable values */
47 while (((n % 5) == 0) && ((d % 5) == 0)) {
48 n /= 5;
49 d /= 5;
50 }
51
52 while (((n % 2) == 0) && ((d % 2) == 0)) {
53 n /= 2;
54 d /= 2;
55 }
56
57 while (n > 0xffff || d > 0xffff) {
58 n >>= 1;
59 d >>= 1;
60 }
61
62 nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
63 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
64 return 0;
65}
66
67u64
68nv04_timer_read(struct drm_device *dev)
69{
70 u32 hi, lo;
71
72 do {
73 hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
74 lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
75 } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
76
77 return ((u64)hi << 32 | lo);
78}
79
80void
81nv04_timer_takedown(struct drm_device *dev)
82{
83}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 7157d403ed59..099fbeda6e2e 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -25,7 +25,8 @@
25 */ 25 */
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "nouveau_drv.h" 28#include "nouveau_drm.h"
29#include "nouveau_reg.h"
29#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
30#include "nouveau_connector.h" 31#include "nouveau_connector.h"
31#include "nouveau_crtc.h" 32#include "nouveau_crtc.h"
@@ -34,6 +35,8 @@
34 35
35#include <drm/i2c/ch7006.h> 36#include <drm/i2c/ch7006.h>
36 37
38#include <subdev/i2c.h>
39
37static struct i2c_board_info nv04_tv_encoder_info[] = { 40static struct i2c_board_info nv04_tv_encoder_info[] = {
38 { 41 {
39 I2C_BOARD_INFO("ch7006", 0x75), 42 I2C_BOARD_INFO("ch7006", 0x75),
@@ -49,8 +52,11 @@ static struct i2c_board_info nv04_tv_encoder_info[] = {
49 52
50int nv04_tv_identify(struct drm_device *dev, int i2c_index) 53int nv04_tv_identify(struct drm_device *dev, int i2c_index)
51{ 54{
52 return nouveau_i2c_identify(dev, "TV encoder", nv04_tv_encoder_info, 55 struct nouveau_drm *drm = nouveau_drm(dev);
53 NULL, i2c_index); 56 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
57
58 return i2c->identify(i2c, i2c_index, "TV encoder",
59 nv04_tv_encoder_info, NULL);
54} 60}
55 61
56 62
@@ -64,12 +70,12 @@ int nv04_tv_identify(struct drm_device *dev, int i2c_index)
64static void nv04_tv_dpms(struct drm_encoder *encoder, int mode) 70static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
65{ 71{
66 struct drm_device *dev = encoder->dev; 72 struct drm_device *dev = encoder->dev;
73 struct nouveau_drm *drm = nouveau_drm(dev);
67 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 74 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
68 struct drm_nouveau_private *dev_priv = dev->dev_private; 75 struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
69 struct nv04_mode_state *state = &dev_priv->mode_reg;
70 uint8_t crtc1A; 76 uint8_t crtc1A;
71 77
72 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", 78 NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
73 mode, nv_encoder->dcb->index); 79 mode, nv_encoder->dcb->index);
74 80
75 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK); 81 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
@@ -94,8 +100,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
94 100
95static void nv04_tv_bind(struct drm_device *dev, int head, bool bind) 101static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
96{ 102{
97 struct drm_nouveau_private *dev_priv = dev->dev_private; 103 struct nv04_crtc_reg *state = &nv04_display(dev)->mode_reg.crtc_reg[head];
98 struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
99 104
100 state->tv_setup = 0; 105 state->tv_setup = 0;
101 106
@@ -133,9 +138,8 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
133 struct drm_display_mode *adjusted_mode) 138 struct drm_display_mode *adjusted_mode)
134{ 139{
135 struct drm_device *dev = encoder->dev; 140 struct drm_device *dev = encoder->dev;
136 struct drm_nouveau_private *dev_priv = dev->dev_private;
137 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 141 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
138 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index]; 142 struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
139 143
140 regp->tv_htotal = adjusted_mode->htotal; 144 regp->tv_htotal = adjusted_mode->htotal;
141 regp->tv_vtotal = adjusted_mode->vtotal; 145 regp->tv_vtotal = adjusted_mode->vtotal;
@@ -157,12 +161,13 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
157{ 161{
158 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 162 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
159 struct drm_device *dev = encoder->dev; 163 struct drm_device *dev = encoder->dev;
164 struct nouveau_drm *drm = nouveau_drm(dev);
160 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 165 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
161 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 166 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
162 167
163 helper->dpms(encoder, DRM_MODE_DPMS_ON); 168 helper->dpms(encoder, DRM_MODE_DPMS_ON);
164 169
165 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 170 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
166 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, 171 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
167 '@' + ffs(nv_encoder->dcb->or)); 172 '@' + ffs(nv_encoder->dcb->or));
168} 173}
@@ -181,15 +186,16 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
181}; 186};
182 187
183int 188int
184nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) 189nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
185{ 190{
186 struct nouveau_encoder *nv_encoder; 191 struct nouveau_encoder *nv_encoder;
187 struct drm_encoder *encoder; 192 struct drm_encoder *encoder;
188 struct drm_device *dev = connector->dev; 193 struct drm_device *dev = connector->dev;
189 struct drm_encoder_helper_funcs *hfuncs; 194 struct drm_encoder_helper_funcs *hfuncs;
190 struct drm_encoder_slave_funcs *sfuncs; 195 struct drm_encoder_slave_funcs *sfuncs;
191 struct nouveau_i2c_chan *i2c = 196 struct nouveau_drm *drm = nouveau_drm(dev);
192 nouveau_i2c_find(dev, entry->i2c_index); 197 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
198 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
193 int type, ret; 199 int type, ret;
194 200
195 /* Ensure that we can talk to this encoder */ 201 /* Ensure that we can talk to this encoder */
@@ -221,7 +227,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
221 227
222 /* Run the slave-specific initialization */ 228 /* Run the slave-specific initialization */
223 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), 229 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
224 &i2c->adapter, &nv04_tv_encoder_info[type]); 230 &port->adapter, &nv04_tv_encoder_info[type]);
225 if (ret < 0) 231 if (ret < 0)
226 goto fail_cleanup; 232 goto fail_cleanup;
227 233
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
deleted file mode 100644
index 510e90f34482..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ /dev/null
@@ -1,103 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5void
6nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
7 uint32_t size, uint32_t pitch, uint32_t flags)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
11
12 tile->addr = 0x80000000 | addr;
13 tile->limit = max(1u, addr + size) - 1;
14 tile->pitch = pitch;
15}
16
17void
18nv10_fb_free_tile_region(struct drm_device *dev, int i)
19{
20 struct drm_nouveau_private *dev_priv = dev->dev_private;
21 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
22
23 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
24}
25
26void
27nv10_fb_set_tile_region(struct drm_device *dev, int i)
28{
29 struct drm_nouveau_private *dev_priv = dev->dev_private;
30 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
31
32 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
33 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
34 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
35}
36
37int
38nv1a_fb_vram_init(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 struct pci_dev *bridge;
42 uint32_t mem, mib;
43
44 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
45 if (!bridge) {
46 NV_ERROR(dev, "no bridge device\n");
47 return 0;
48 }
49
50 if (dev_priv->chipset == 0x1a) {
51 pci_read_config_dword(bridge, 0x7c, &mem);
52 mib = ((mem >> 6) & 31) + 1;
53 } else {
54 pci_read_config_dword(bridge, 0x84, &mem);
55 mib = ((mem >> 4) & 127) + 1;
56 }
57
58 dev_priv->vram_size = mib * 1024 * 1024;
59 return 0;
60}
61
62int
63nv10_fb_vram_init(struct drm_device *dev)
64{
65 struct drm_nouveau_private *dev_priv = dev->dev_private;
66 u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
67 u32 cfg0 = nv_rd32(dev, 0x100200);
68
69 dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
70
71 if (cfg0 & 0x00000001)
72 dev_priv->vram_type = NV_MEM_TYPE_DDR1;
73 else
74 dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
75
76 return 0;
77}
78
79int
80nv10_fb_init(struct drm_device *dev)
81{
82 struct drm_nouveau_private *dev_priv = dev->dev_private;
83 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
84 int i;
85
86 /* Turn all the tiling regions off. */
87 pfb->num_tiles = NV10_PFB_TILE__SIZE;
88 for (i = 0; i < pfb->num_tiles; i++)
89 pfb->set_tile_region(dev, i);
90
91 return 0;
92}
93
94void
95nv10_fb_takedown(struct drm_device *dev)
96{
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
99 int i;
100
101 for (i = 0; i < pfb->num_tiles; i++)
102 pfb->free_tile_region(dev, i);
103}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index d30f752464ef..ce752bf5cc4e 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,10 +22,11 @@
22 * Authors: Ben Skeggs <bskeggs@redhat.com> 22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include "nouveau_drm.h"
27#include "nouveau_dma.h" 29#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h" 30#include "nouveau_fence.h"
30 31
31struct nv10_fence_chan { 32struct nv10_fence_chan {
@@ -39,7 +40,7 @@ struct nv10_fence_priv {
39 u32 sequence; 40 u32 sequence;
40}; 41};
41 42
42static int 43int
43nv10_fence_emit(struct nouveau_fence *fence) 44nv10_fence_emit(struct nouveau_fence *fence)
44{ 45{
45 struct nouveau_channel *chan = fence->channel; 46 struct nouveau_channel *chan = fence->channel;
@@ -60,15 +61,15 @@ nv10_fence_sync(struct nouveau_fence *fence,
60 return -ENODEV; 61 return -ENODEV;
61} 62}
62 63
63static int 64int
64nv17_fence_sync(struct nouveau_fence *fence, 65nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan) 66 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{ 67{
67 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 68 struct nv10_fence_priv *priv = chan->drm->fence;
68 u32 value; 69 u32 value;
69 int ret; 70 int ret;
70 71
71 if (!mutex_trylock(&prev->mutex)) 72 if (!mutex_trylock(&prev->cli->mutex))
72 return -EBUSY; 73 return -EBUSY;
73 74
74 spin_lock(&priv->lock); 75 spin_lock(&priv->lock);
@@ -95,34 +96,33 @@ nv17_fence_sync(struct nouveau_fence *fence,
95 FIRE_RING (chan); 96 FIRE_RING (chan);
96 } 97 }
97 98
98 mutex_unlock(&prev->mutex); 99 mutex_unlock(&prev->cli->mutex);
99 return 0; 100 return 0;
100} 101}
101 102
102static u32 103u32
103nv10_fence_read(struct nouveau_channel *chan) 104nv10_fence_read(struct nouveau_channel *chan)
104{ 105{
105 return nvchan_rd32(chan, 0x0048); 106 return nv_ro32(chan->object, 0x0048);
106} 107}
107 108
108static void 109void
109nv10_fence_context_del(struct nouveau_channel *chan, int engine) 110nv10_fence_context_del(struct nouveau_channel *chan)
110{ 111{
111 struct nv10_fence_chan *fctx = chan->engctx[engine]; 112 struct nv10_fence_chan *fctx = chan->fence;
112 nouveau_fence_context_del(&fctx->base); 113 nouveau_fence_context_del(&fctx->base);
113 chan->engctx[engine] = NULL; 114 chan->fence = NULL;
114 kfree(fctx); 115 kfree(fctx);
115} 116}
116 117
117static int 118static int
118nv10_fence_context_new(struct nouveau_channel *chan, int engine) 119nv10_fence_context_new(struct nouveau_channel *chan)
119{ 120{
120 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine); 121 struct nv10_fence_priv *priv = chan->drm->fence;
121 struct nv10_fence_chan *fctx; 122 struct nv10_fence_chan *fctx;
122 struct nouveau_gpuobj *obj;
123 int ret = 0; 123 int ret = 0;
124 124
125 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 125 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx) 126 if (!fctx)
127 return -ENOMEM; 127 return -ENOMEM;
128 128
@@ -130,69 +130,56 @@ nv10_fence_context_new(struct nouveau_channel *chan, int engine)
130 130
131 if (priv->bo) { 131 if (priv->bo) {
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 132 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
133 133 struct nouveau_object *object;
134 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 134 u32 start = mem->start * PAGE_SIZE;
135 mem->start * PAGE_SIZE, mem->size, 135 u32 limit = mem->start + mem->size - 1;
136 NV_MEM_ACCESS_RW, 136
137 NV_MEM_TARGET_VRAM, &obj); 137 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
138 if (!ret) { 138 NvSema, 0x0002,
139 ret = nouveau_ramht_insert(chan, NvSema, obj); 139 &(struct nv_dma_class) {
140 nouveau_gpuobj_ref(NULL, &obj); 140 .flags = NV_DMA_TARGET_VRAM |
141 } 141 NV_DMA_ACCESS_RDWR,
142 .start = start,
143 .limit = limit,
144 }, sizeof(struct nv_dma_class),
145 &object);
142 } 146 }
143 147
144 if (ret) 148 if (ret)
145 nv10_fence_context_del(chan, engine); 149 nv10_fence_context_del(chan);
146 return ret; 150 return ret;
147} 151}
148 152
149static int 153void
150nv10_fence_fini(struct drm_device *dev, int engine, bool suspend) 154nv10_fence_destroy(struct nouveau_drm *drm)
151{ 155{
152 return 0; 156 struct nv10_fence_priv *priv = drm->fence;
153} 157 nouveau_bo_unmap(priv->bo);
154
155static int
156nv10_fence_init(struct drm_device *dev, int engine)
157{
158 return 0;
159}
160
161static void
162nv10_fence_destroy(struct drm_device *dev, int engine)
163{
164 struct drm_nouveau_private *dev_priv = dev->dev_private;
165 struct nv10_fence_priv *priv = nv_engine(dev, engine);
166
167 nouveau_bo_ref(NULL, &priv->bo); 158 nouveau_bo_ref(NULL, &priv->bo);
168 dev_priv->eng[engine] = NULL; 159 drm->fence = NULL;
169 kfree(priv); 160 kfree(priv);
170} 161}
171 162
172int 163int
173nv10_fence_create(struct drm_device *dev) 164nv10_fence_create(struct nouveau_drm *drm)
174{ 165{
175 struct drm_nouveau_private *dev_priv = dev->dev_private;
176 struct nv10_fence_priv *priv; 166 struct nv10_fence_priv *priv;
177 int ret = 0; 167 int ret = 0;
178 168
179 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 169 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
180 if (!priv) 170 if (!priv)
181 return -ENOMEM; 171 return -ENOMEM;
182 172
183 priv->base.engine.destroy = nv10_fence_destroy; 173 priv->base.dtor = nv10_fence_destroy;
184 priv->base.engine.init = nv10_fence_init; 174 priv->base.context_new = nv10_fence_context_new;
185 priv->base.engine.fini = nv10_fence_fini; 175 priv->base.context_del = nv10_fence_context_del;
186 priv->base.engine.context_new = nv10_fence_context_new;
187 priv->base.engine.context_del = nv10_fence_context_del;
188 priv->base.emit = nv10_fence_emit; 176 priv->base.emit = nv10_fence_emit;
189 priv->base.read = nv10_fence_read; 177 priv->base.read = nv10_fence_read;
190 priv->base.sync = nv10_fence_sync; 178 priv->base.sync = nv10_fence_sync;
191 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
192 spin_lock_init(&priv->lock); 179 spin_lock_init(&priv->lock);
193 180
194 if (dev_priv->chipset >= 0x17) { 181 if (nv_device(drm->device)->chipset >= 0x17) {
195 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 182 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
196 0, 0x0000, NULL, &priv->bo); 183 0, 0x0000, NULL, &priv->bo);
197 if (!ret) { 184 if (!ret) {
198 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 185 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -209,6 +196,6 @@ nv10_fence_create(struct drm_device *dev)
209 } 196 }
210 197
211 if (ret) 198 if (ret)
212 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 199 nv10_fence_destroy(drm);
213 return ret; 200 return ret;
214} 201}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
deleted file mode 100644
index 05a2499b7a4d..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ /dev/null
@@ -1,137 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
32
33static struct ramfc_desc {
34 unsigned bits:6;
35 unsigned ctxs:5;
36 unsigned ctxp:8;
37 unsigned regs:5;
38 unsigned regp;
39} nv10_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 {}
50};
51
52struct nv10_fifo_priv {
53 struct nouveau_fifo_priv base;
54 struct ramfc_desc *ramfc_desc;
55};
56
57struct nv10_fifo_chan {
58 struct nouveau_fifo_chan base;
59 struct nouveau_gpuobj *ramfc;
60};
61
62static int
63nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
64{
65 struct drm_device *dev = chan->dev;
66 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 struct nv10_fifo_priv *priv = nv_engine(dev, engine);
68 struct nv10_fifo_chan *fctx;
69 unsigned long flags;
70 int ret;
71
72 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
73 if (!fctx)
74 return -ENOMEM;
75
76 /* map channel control registers */
77 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
78 NV03_USER(chan->id), PAGE_SIZE);
79 if (!chan->user) {
80 ret = -ENOMEM;
81 goto error;
82 }
83
84 /* initialise default fifo context */
85 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
86 chan->id * 32, ~0, 32,
87 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
88 if (ret)
89 goto error;
90
91 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
92 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
93 nv_wo32(fctx->ramfc, 0x08, 0x00000000);
94 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
95 nv_wo32(fctx->ramfc, 0x10, 0x00000000);
96 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
97 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
98#ifdef __BIG_ENDIAN
99 NV_PFIFO_CACHE1_BIG_ENDIAN |
100#endif
101 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
102 nv_wo32(fctx->ramfc, 0x18, 0x00000000);
103 nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
104
105 /* enable dma mode on the channel */
106 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
107 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
108 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
109
110error:
111 if (ret)
112 priv->base.base.context_del(chan, engine);
113 return ret;
114}
115
116int
117nv10_fifo_create(struct drm_device *dev)
118{
119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 struct nv10_fifo_priv *priv;
121
122 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
123 if (!priv)
124 return -ENOMEM;
125
126 priv->base.base.destroy = nv04_fifo_destroy;
127 priv->base.base.init = nv04_fifo_init;
128 priv->base.base.fini = nv04_fifo_fini;
129 priv->base.base.context_new = nv10_fifo_context_new;
130 priv->base.base.context_del = nv04_fifo_context_del;
131 priv->base.channels = 31;
132 priv->ramfc_desc = nv10_ramfc;
133 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
134
135 nouveau_irq_register(dev, 8, nv04_fifo_isr);
136 return 0;
137}
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
deleted file mode 100644
index ecc1b62dd751..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ /dev/null
@@ -1,123 +0,0 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30#include "nouveau_gpio.h"
31
32int
33nv10_gpio_sense(struct drm_device *dev, int line)
34{
35 if (line < 2) {
36 line = line * 16;
37 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
38 return !!(line & 0x0100);
39 } else
40 if (line < 10) {
41 line = (line - 2) * 4;
42 line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
43 return !!(line & 0x04);
44 } else
45 if (line < 14) {
46 line = (line - 10) * 4;
47 line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
48 return !!(line & 0x04);
49 }
50
51 return -EINVAL;
52}
53
54int
55nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
56{
57 u32 reg, mask, data;
58
59 if (line < 2) {
60 line = line * 16;
61 reg = NV_PCRTC_GPIO;
62 mask = 0x00000011;
63 data = (dir << 4) | out;
64 } else
65 if (line < 10) {
66 line = (line - 2) * 4;
67 reg = NV_PCRTC_GPIO_EXT;
68 mask = 0x00000003;
69 data = (dir << 1) | out;
70 } else
71 if (line < 14) {
72 line = (line - 10) * 4;
73 reg = NV_PCRTC_850;
74 mask = 0x00000003;
75 data = (dir << 1) | out;
76 } else {
77 return -EINVAL;
78 }
79
80 mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
81 NVWriteCRTC(dev, 0, reg, mask | (data << line));
82 return 0;
83}
84
85void
86nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
87{
88 u32 mask = 0x00010001 << line;
89
90 nv_wr32(dev, 0x001104, mask);
91 nv_mask(dev, 0x001144, mask, on ? mask : 0);
92}
93
94static void
95nv10_gpio_isr(struct drm_device *dev)
96{
97 u32 intr = nv_rd32(dev, 0x1104);
98 u32 hi = (intr & 0x0000ffff) >> 0;
99 u32 lo = (intr & 0xffff0000) >> 16;
100
101 nouveau_gpio_isr(dev, 0, hi | lo);
102
103 nv_wr32(dev, 0x001104, intr);
104}
105
106int
107nv10_gpio_init(struct drm_device *dev)
108{
109 nv_wr32(dev, 0x001140, 0x00000000);
110 nv_wr32(dev, 0x001100, 0xffffffff);
111 nv_wr32(dev, 0x001144, 0x00000000);
112 nv_wr32(dev, 0x001104, 0xffffffff);
113 nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
114 return 0;
115}
116
117void
118nv10_gpio_fini(struct drm_device *dev)
119{
120 nv_wr32(dev, 0x001140, 0x00000000);
121 nv_wr32(dev, 0x001144, 0x00000000);
122 nouveau_irq_unregister(dev, 28);
123}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
deleted file mode 100644
index 75dd51bbe64d..000000000000
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ /dev/null
@@ -1,1188 +0,0 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <drm/drmP.h>
26#include <drm/nouveau_drm.h>
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29
30struct nv10_graph_engine {
31 struct nouveau_exec_engine base;
32};
33
34struct pipe_state {
35 uint32_t pipe_0x0000[0x040/4];
36 uint32_t pipe_0x0040[0x010/4];
37 uint32_t pipe_0x0200[0x0c0/4];
38 uint32_t pipe_0x4400[0x080/4];
39 uint32_t pipe_0x6400[0x3b0/4];
40 uint32_t pipe_0x6800[0x2f0/4];
41 uint32_t pipe_0x6c00[0x030/4];
42 uint32_t pipe_0x7000[0x130/4];
43 uint32_t pipe_0x7400[0x0c0/4];
44 uint32_t pipe_0x7800[0x0c0/4];
45};
46
47static int nv10_graph_ctx_regs[] = {
48 NV10_PGRAPH_CTX_SWITCH(0),
49 NV10_PGRAPH_CTX_SWITCH(1),
50 NV10_PGRAPH_CTX_SWITCH(2),
51 NV10_PGRAPH_CTX_SWITCH(3),
52 NV10_PGRAPH_CTX_SWITCH(4),
53 NV10_PGRAPH_CTX_CACHE(0, 0),
54 NV10_PGRAPH_CTX_CACHE(0, 1),
55 NV10_PGRAPH_CTX_CACHE(0, 2),
56 NV10_PGRAPH_CTX_CACHE(0, 3),
57 NV10_PGRAPH_CTX_CACHE(0, 4),
58 NV10_PGRAPH_CTX_CACHE(1, 0),
59 NV10_PGRAPH_CTX_CACHE(1, 1),
60 NV10_PGRAPH_CTX_CACHE(1, 2),
61 NV10_PGRAPH_CTX_CACHE(1, 3),
62 NV10_PGRAPH_CTX_CACHE(1, 4),
63 NV10_PGRAPH_CTX_CACHE(2, 0),
64 NV10_PGRAPH_CTX_CACHE(2, 1),
65 NV10_PGRAPH_CTX_CACHE(2, 2),
66 NV10_PGRAPH_CTX_CACHE(2, 3),
67 NV10_PGRAPH_CTX_CACHE(2, 4),
68 NV10_PGRAPH_CTX_CACHE(3, 0),
69 NV10_PGRAPH_CTX_CACHE(3, 1),
70 NV10_PGRAPH_CTX_CACHE(3, 2),
71 NV10_PGRAPH_CTX_CACHE(3, 3),
72 NV10_PGRAPH_CTX_CACHE(3, 4),
73 NV10_PGRAPH_CTX_CACHE(4, 0),
74 NV10_PGRAPH_CTX_CACHE(4, 1),
75 NV10_PGRAPH_CTX_CACHE(4, 2),
76 NV10_PGRAPH_CTX_CACHE(4, 3),
77 NV10_PGRAPH_CTX_CACHE(4, 4),
78 NV10_PGRAPH_CTX_CACHE(5, 0),
79 NV10_PGRAPH_CTX_CACHE(5, 1),
80 NV10_PGRAPH_CTX_CACHE(5, 2),
81 NV10_PGRAPH_CTX_CACHE(5, 3),
82 NV10_PGRAPH_CTX_CACHE(5, 4),
83 NV10_PGRAPH_CTX_CACHE(6, 0),
84 NV10_PGRAPH_CTX_CACHE(6, 1),
85 NV10_PGRAPH_CTX_CACHE(6, 2),
86 NV10_PGRAPH_CTX_CACHE(6, 3),
87 NV10_PGRAPH_CTX_CACHE(6, 4),
88 NV10_PGRAPH_CTX_CACHE(7, 0),
89 NV10_PGRAPH_CTX_CACHE(7, 1),
90 NV10_PGRAPH_CTX_CACHE(7, 2),
91 NV10_PGRAPH_CTX_CACHE(7, 3),
92 NV10_PGRAPH_CTX_CACHE(7, 4),
93 NV10_PGRAPH_CTX_USER,
94 NV04_PGRAPH_DMA_START_0,
95 NV04_PGRAPH_DMA_START_1,
96 NV04_PGRAPH_DMA_LENGTH,
97 NV04_PGRAPH_DMA_MISC,
98 NV10_PGRAPH_DMA_PITCH,
99 NV04_PGRAPH_BOFFSET0,
100 NV04_PGRAPH_BBASE0,
101 NV04_PGRAPH_BLIMIT0,
102 NV04_PGRAPH_BOFFSET1,
103 NV04_PGRAPH_BBASE1,
104 NV04_PGRAPH_BLIMIT1,
105 NV04_PGRAPH_BOFFSET2,
106 NV04_PGRAPH_BBASE2,
107 NV04_PGRAPH_BLIMIT2,
108 NV04_PGRAPH_BOFFSET3,
109 NV04_PGRAPH_BBASE3,
110 NV04_PGRAPH_BLIMIT3,
111 NV04_PGRAPH_BOFFSET4,
112 NV04_PGRAPH_BBASE4,
113 NV04_PGRAPH_BLIMIT4,
114 NV04_PGRAPH_BOFFSET5,
115 NV04_PGRAPH_BBASE5,
116 NV04_PGRAPH_BLIMIT5,
117 NV04_PGRAPH_BPITCH0,
118 NV04_PGRAPH_BPITCH1,
119 NV04_PGRAPH_BPITCH2,
120 NV04_PGRAPH_BPITCH3,
121 NV04_PGRAPH_BPITCH4,
122 NV10_PGRAPH_SURFACE,
123 NV10_PGRAPH_STATE,
124 NV04_PGRAPH_BSWIZZLE2,
125 NV04_PGRAPH_BSWIZZLE5,
126 NV04_PGRAPH_BPIXEL,
127 NV10_PGRAPH_NOTIFY,
128 NV04_PGRAPH_PATT_COLOR0,
129 NV04_PGRAPH_PATT_COLOR1,
130 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
131 0x00400904,
132 0x00400908,
133 0x0040090c,
134 0x00400910,
135 0x00400914,
136 0x00400918,
137 0x0040091c,
138 0x00400920,
139 0x00400924,
140 0x00400928,
141 0x0040092c,
142 0x00400930,
143 0x00400934,
144 0x00400938,
145 0x0040093c,
146 0x00400940,
147 0x00400944,
148 0x00400948,
149 0x0040094c,
150 0x00400950,
151 0x00400954,
152 0x00400958,
153 0x0040095c,
154 0x00400960,
155 0x00400964,
156 0x00400968,
157 0x0040096c,
158 0x00400970,
159 0x00400974,
160 0x00400978,
161 0x0040097c,
162 0x00400980,
163 0x00400984,
164 0x00400988,
165 0x0040098c,
166 0x00400990,
167 0x00400994,
168 0x00400998,
169 0x0040099c,
170 0x004009a0,
171 0x004009a4,
172 0x004009a8,
173 0x004009ac,
174 0x004009b0,
175 0x004009b4,
176 0x004009b8,
177 0x004009bc,
178 0x004009c0,
179 0x004009c4,
180 0x004009c8,
181 0x004009cc,
182 0x004009d0,
183 0x004009d4,
184 0x004009d8,
185 0x004009dc,
186 0x004009e0,
187 0x004009e4,
188 0x004009e8,
189 0x004009ec,
190 0x004009f0,
191 0x004009f4,
192 0x004009f8,
193 0x004009fc,
194 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
195 0x0040080c,
196 NV04_PGRAPH_PATTERN_SHAPE,
197 NV03_PGRAPH_MONO_COLOR0,
198 NV04_PGRAPH_ROP3,
199 NV04_PGRAPH_CHROMA,
200 NV04_PGRAPH_BETA_AND,
201 NV04_PGRAPH_BETA_PREMULT,
202 0x00400e70,
203 0x00400e74,
204 0x00400e78,
205 0x00400e7c,
206 0x00400e80,
207 0x00400e84,
208 0x00400e88,
209 0x00400e8c,
210 0x00400ea0,
211 0x00400ea4,
212 0x00400ea8,
213 0x00400e90,
214 0x00400e94,
215 0x00400e98,
216 0x00400e9c,
217 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
218 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
219 0x00400f04,
220 0x00400f24,
221 0x00400f08,
222 0x00400f28,
223 0x00400f0c,
224 0x00400f2c,
225 0x00400f10,
226 0x00400f30,
227 0x00400f14,
228 0x00400f34,
229 0x00400f18,
230 0x00400f38,
231 0x00400f1c,
232 0x00400f3c,
233 NV10_PGRAPH_XFMODE0,
234 NV10_PGRAPH_XFMODE1,
235 NV10_PGRAPH_GLOBALSTATE0,
236 NV10_PGRAPH_GLOBALSTATE1,
237 NV04_PGRAPH_STORED_FMT,
238 NV04_PGRAPH_SOURCE_COLOR,
239 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
240 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
241 0x00400404,
242 0x00400484,
243 0x00400408,
244 0x00400488,
245 0x0040040c,
246 0x0040048c,
247 0x00400410,
248 0x00400490,
249 0x00400414,
250 0x00400494,
251 0x00400418,
252 0x00400498,
253 0x0040041c,
254 0x0040049c,
255 0x00400420,
256 0x004004a0,
257 0x00400424,
258 0x004004a4,
259 0x00400428,
260 0x004004a8,
261 0x0040042c,
262 0x004004ac,
263 0x00400430,
264 0x004004b0,
265 0x00400434,
266 0x004004b4,
267 0x00400438,
268 0x004004b8,
269 0x0040043c,
270 0x004004bc,
271 0x00400440,
272 0x004004c0,
273 0x00400444,
274 0x004004c4,
275 0x00400448,
276 0x004004c8,
277 0x0040044c,
278 0x004004cc,
279 0x00400450,
280 0x004004d0,
281 0x00400454,
282 0x004004d4,
283 0x00400458,
284 0x004004d8,
285 0x0040045c,
286 0x004004dc,
287 0x00400460,
288 0x004004e0,
289 0x00400464,
290 0x004004e4,
291 0x00400468,
292 0x004004e8,
293 0x0040046c,
294 0x004004ec,
295 0x00400470,
296 0x004004f0,
297 0x00400474,
298 0x004004f4,
299 0x00400478,
300 0x004004f8,
301 0x0040047c,
302 0x004004fc,
303 NV03_PGRAPH_ABS_UCLIP_XMIN,
304 NV03_PGRAPH_ABS_UCLIP_XMAX,
305 NV03_PGRAPH_ABS_UCLIP_YMIN,
306 NV03_PGRAPH_ABS_UCLIP_YMAX,
307 0x00400550,
308 0x00400558,
309 0x00400554,
310 0x0040055c,
311 NV03_PGRAPH_ABS_UCLIPA_XMIN,
312 NV03_PGRAPH_ABS_UCLIPA_XMAX,
313 NV03_PGRAPH_ABS_UCLIPA_YMIN,
314 NV03_PGRAPH_ABS_UCLIPA_YMAX,
315 NV03_PGRAPH_ABS_ICLIP_XMAX,
316 NV03_PGRAPH_ABS_ICLIP_YMAX,
317 NV03_PGRAPH_XY_LOGIC_MISC0,
318 NV03_PGRAPH_XY_LOGIC_MISC1,
319 NV03_PGRAPH_XY_LOGIC_MISC2,
320 NV03_PGRAPH_XY_LOGIC_MISC3,
321 NV03_PGRAPH_CLIPX_0,
322 NV03_PGRAPH_CLIPX_1,
323 NV03_PGRAPH_CLIPY_0,
324 NV03_PGRAPH_CLIPY_1,
325 NV10_PGRAPH_COMBINER0_IN_ALPHA,
326 NV10_PGRAPH_COMBINER1_IN_ALPHA,
327 NV10_PGRAPH_COMBINER0_IN_RGB,
328 NV10_PGRAPH_COMBINER1_IN_RGB,
329 NV10_PGRAPH_COMBINER_COLOR0,
330 NV10_PGRAPH_COMBINER_COLOR1,
331 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
332 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
333 NV10_PGRAPH_COMBINER0_OUT_RGB,
334 NV10_PGRAPH_COMBINER1_OUT_RGB,
335 NV10_PGRAPH_COMBINER_FINAL0,
336 NV10_PGRAPH_COMBINER_FINAL1,
337 0x00400e00,
338 0x00400e04,
339 0x00400e08,
340 0x00400e0c,
341 0x00400e10,
342 0x00400e14,
343 0x00400e18,
344 0x00400e1c,
345 0x00400e20,
346 0x00400e24,
347 0x00400e28,
348 0x00400e2c,
349 0x00400e30,
350 0x00400e34,
351 0x00400e38,
352 0x00400e3c,
353 NV04_PGRAPH_PASSTHRU_0,
354 NV04_PGRAPH_PASSTHRU_1,
355 NV04_PGRAPH_PASSTHRU_2,
356 NV10_PGRAPH_DIMX_TEXTURE,
357 NV10_PGRAPH_WDIMX_TEXTURE,
358 NV10_PGRAPH_DVD_COLORFMT,
359 NV10_PGRAPH_SCALED_FORMAT,
360 NV04_PGRAPH_MISC24_0,
361 NV04_PGRAPH_MISC24_1,
362 NV04_PGRAPH_MISC24_2,
363 NV03_PGRAPH_X_MISC,
364 NV03_PGRAPH_Y_MISC,
365 NV04_PGRAPH_VALID1,
366 NV04_PGRAPH_VALID2,
367};
368
369static int nv17_graph_ctx_regs[] = {
370 NV10_PGRAPH_DEBUG_4,
371 0x004006b0,
372 0x00400eac,
373 0x00400eb0,
374 0x00400eb4,
375 0x00400eb8,
376 0x00400ebc,
377 0x00400ec0,
378 0x00400ec4,
379 0x00400ec8,
380 0x00400ecc,
381 0x00400ed0,
382 0x00400ed4,
383 0x00400ed8,
384 0x00400edc,
385 0x00400ee0,
386 0x00400a00,
387 0x00400a04,
388};
389
390struct graph_state {
391 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
392 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
393 struct pipe_state pipe_state;
394 uint32_t lma_window[4];
395};
396
397#define PIPE_SAVE(dev, state, addr) \
398 do { \
399 int __i; \
400 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
401 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
402 state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
403 } while (0)
404
405#define PIPE_RESTORE(dev, state, addr) \
406 do { \
407 int __i; \
408 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
409 for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
410 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
411 } while (0)
412
413static void nv10_graph_save_pipe(struct nouveau_channel *chan)
414{
415 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
416 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
417 struct drm_device *dev = chan->dev;
418
419 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
420 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
421 PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
422 PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
423 PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
424 PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
425 PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
426 PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
427 PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
428 PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
429}
430
431static void nv10_graph_load_pipe(struct nouveau_channel *chan)
432{
433 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
434 struct pipe_state *pipe = &pgraph_ctx->pipe_state;
435 struct drm_device *dev = chan->dev;
436 uint32_t xfmode0, xfmode1;
437 int i;
438
439 nouveau_wait_for_idle(dev);
440 /* XXX check haiku comments */
441 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
442 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
443 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
444 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
445 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
446 for (i = 0; i < 4; i++)
447 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
448 for (i = 0; i < 4; i++)
449 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
450
451 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
452 for (i = 0; i < 3; i++)
453 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
454
455 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
456 for (i = 0; i < 3; i++)
457 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
458
459 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
460 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
461
462
463 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
464 nouveau_wait_for_idle(dev);
465
466 /* restore XFMODE */
467 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
468 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
469 PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
470 PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
471 PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
472 PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
473 PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
474 PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
475 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
476 PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
477 PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
478 nouveau_wait_for_idle(dev);
479}
480
481static void nv10_graph_create_pipe(struct nouveau_channel *chan)
482{
483 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
484 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
485 struct drm_device *dev = chan->dev;
486 uint32_t *fifo_pipe_state_addr;
487 int i;
488#define PIPE_INIT(addr) \
489 do { \
490 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
491 } while (0)
492#define PIPE_INIT_END(addr) \
493 do { \
494 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
495 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
496 if (fifo_pipe_state_addr != __end_addr) \
497 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
498 addr, fifo_pipe_state_addr, __end_addr); \
499 } while (0)
500#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
501
502 PIPE_INIT(0x0200);
503 for (i = 0; i < 48; i++)
504 NV_WRITE_PIPE_INIT(0x00000000);
505 PIPE_INIT_END(0x0200);
506
507 PIPE_INIT(0x6400);
508 for (i = 0; i < 211; i++)
509 NV_WRITE_PIPE_INIT(0x00000000);
510 NV_WRITE_PIPE_INIT(0x3f800000);
511 NV_WRITE_PIPE_INIT(0x40000000);
512 NV_WRITE_PIPE_INIT(0x40000000);
513 NV_WRITE_PIPE_INIT(0x40000000);
514 NV_WRITE_PIPE_INIT(0x40000000);
515 NV_WRITE_PIPE_INIT(0x00000000);
516 NV_WRITE_PIPE_INIT(0x00000000);
517 NV_WRITE_PIPE_INIT(0x3f800000);
518 NV_WRITE_PIPE_INIT(0x00000000);
519 NV_WRITE_PIPE_INIT(0x3f000000);
520 NV_WRITE_PIPE_INIT(0x3f000000);
521 NV_WRITE_PIPE_INIT(0x00000000);
522 NV_WRITE_PIPE_INIT(0x00000000);
523 NV_WRITE_PIPE_INIT(0x00000000);
524 NV_WRITE_PIPE_INIT(0x00000000);
525 NV_WRITE_PIPE_INIT(0x3f800000);
526 NV_WRITE_PIPE_INIT(0x00000000);
527 NV_WRITE_PIPE_INIT(0x00000000);
528 NV_WRITE_PIPE_INIT(0x00000000);
529 NV_WRITE_PIPE_INIT(0x00000000);
530 NV_WRITE_PIPE_INIT(0x00000000);
531 NV_WRITE_PIPE_INIT(0x3f800000);
532 NV_WRITE_PIPE_INIT(0x3f800000);
533 NV_WRITE_PIPE_INIT(0x3f800000);
534 NV_WRITE_PIPE_INIT(0x3f800000);
535 PIPE_INIT_END(0x6400);
536
537 PIPE_INIT(0x6800);
538 for (i = 0; i < 162; i++)
539 NV_WRITE_PIPE_INIT(0x00000000);
540 NV_WRITE_PIPE_INIT(0x3f800000);
541 for (i = 0; i < 25; i++)
542 NV_WRITE_PIPE_INIT(0x00000000);
543 PIPE_INIT_END(0x6800);
544
545 PIPE_INIT(0x6c00);
546 NV_WRITE_PIPE_INIT(0x00000000);
547 NV_WRITE_PIPE_INIT(0x00000000);
548 NV_WRITE_PIPE_INIT(0x00000000);
549 NV_WRITE_PIPE_INIT(0x00000000);
550 NV_WRITE_PIPE_INIT(0xbf800000);
551 NV_WRITE_PIPE_INIT(0x00000000);
552 NV_WRITE_PIPE_INIT(0x00000000);
553 NV_WRITE_PIPE_INIT(0x00000000);
554 NV_WRITE_PIPE_INIT(0x00000000);
555 NV_WRITE_PIPE_INIT(0x00000000);
556 NV_WRITE_PIPE_INIT(0x00000000);
557 NV_WRITE_PIPE_INIT(0x00000000);
558 PIPE_INIT_END(0x6c00);
559
560 PIPE_INIT(0x7000);
561 NV_WRITE_PIPE_INIT(0x00000000);
562 NV_WRITE_PIPE_INIT(0x00000000);
563 NV_WRITE_PIPE_INIT(0x00000000);
564 NV_WRITE_PIPE_INIT(0x00000000);
565 NV_WRITE_PIPE_INIT(0x00000000);
566 NV_WRITE_PIPE_INIT(0x00000000);
567 NV_WRITE_PIPE_INIT(0x00000000);
568 NV_WRITE_PIPE_INIT(0x00000000);
569 NV_WRITE_PIPE_INIT(0x00000000);
570 NV_WRITE_PIPE_INIT(0x00000000);
571 NV_WRITE_PIPE_INIT(0x00000000);
572 NV_WRITE_PIPE_INIT(0x00000000);
573 NV_WRITE_PIPE_INIT(0x7149f2ca);
574 NV_WRITE_PIPE_INIT(0x00000000);
575 NV_WRITE_PIPE_INIT(0x00000000);
576 NV_WRITE_PIPE_INIT(0x00000000);
577 NV_WRITE_PIPE_INIT(0x7149f2ca);
578 NV_WRITE_PIPE_INIT(0x00000000);
579 NV_WRITE_PIPE_INIT(0x00000000);
580 NV_WRITE_PIPE_INIT(0x00000000);
581 NV_WRITE_PIPE_INIT(0x7149f2ca);
582 NV_WRITE_PIPE_INIT(0x00000000);
583 NV_WRITE_PIPE_INIT(0x00000000);
584 NV_WRITE_PIPE_INIT(0x00000000);
585 NV_WRITE_PIPE_INIT(0x7149f2ca);
586 NV_WRITE_PIPE_INIT(0x00000000);
587 NV_WRITE_PIPE_INIT(0x00000000);
588 NV_WRITE_PIPE_INIT(0x00000000);
589 NV_WRITE_PIPE_INIT(0x7149f2ca);
590 NV_WRITE_PIPE_INIT(0x00000000);
591 NV_WRITE_PIPE_INIT(0x00000000);
592 NV_WRITE_PIPE_INIT(0x00000000);
593 NV_WRITE_PIPE_INIT(0x7149f2ca);
594 NV_WRITE_PIPE_INIT(0x00000000);
595 NV_WRITE_PIPE_INIT(0x00000000);
596 NV_WRITE_PIPE_INIT(0x00000000);
597 NV_WRITE_PIPE_INIT(0x7149f2ca);
598 NV_WRITE_PIPE_INIT(0x00000000);
599 NV_WRITE_PIPE_INIT(0x00000000);
600 NV_WRITE_PIPE_INIT(0x00000000);
601 NV_WRITE_PIPE_INIT(0x7149f2ca);
602 for (i = 0; i < 35; i++)
603 NV_WRITE_PIPE_INIT(0x00000000);
604 PIPE_INIT_END(0x7000);
605
606 PIPE_INIT(0x7400);
607 for (i = 0; i < 48; i++)
608 NV_WRITE_PIPE_INIT(0x00000000);
609 PIPE_INIT_END(0x7400);
610
611 PIPE_INIT(0x7800);
612 for (i = 0; i < 48; i++)
613 NV_WRITE_PIPE_INIT(0x00000000);
614 PIPE_INIT_END(0x7800);
615
616 PIPE_INIT(0x4400);
617 for (i = 0; i < 32; i++)
618 NV_WRITE_PIPE_INIT(0x00000000);
619 PIPE_INIT_END(0x4400);
620
621 PIPE_INIT(0x0000);
622 for (i = 0; i < 16; i++)
623 NV_WRITE_PIPE_INIT(0x00000000);
624 PIPE_INIT_END(0x0000);
625
626 PIPE_INIT(0x0040);
627 for (i = 0; i < 4; i++)
628 NV_WRITE_PIPE_INIT(0x00000000);
629 PIPE_INIT_END(0x0040);
630
631#undef PIPE_INIT
632#undef PIPE_INIT_END
633#undef NV_WRITE_PIPE_INIT
634}
635
636static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
637{
638 int i;
639 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
640 if (nv10_graph_ctx_regs[i] == reg)
641 return i;
642 }
643 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
644 return -1;
645}
646
647static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
648{
649 int i;
650 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
651 if (nv17_graph_ctx_regs[i] == reg)
652 return i;
653 }
654 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
655 return -1;
656}
657
658static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
659 uint32_t inst)
660{
661 struct drm_device *dev = chan->dev;
662 uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
663 uint32_t ctx_user, ctx_switch[5];
664 int i, subchan = -1;
665
666 /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
667 * that cannot be restored via MMIO. Do it through the FIFO
668 * instead.
669 */
670
671 /* Look for a celsius object */
672 for (i = 0; i < 8; i++) {
673 int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
674
675 if (class == 0x56 || class == 0x96 || class == 0x99) {
676 subchan = i;
677 break;
678 }
679 }
680
681 if (subchan < 0 || !inst)
682 return;
683
684 /* Save the current ctx object */
685 ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
686 for (i = 0; i < 5; i++)
687 ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
688
689 /* Save the FIFO state */
690 st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
691 st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
692 st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
693 fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
694
695 for (i = 0; i < ARRAY_SIZE(fifo); i++)
696 fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
697
698 /* Switch to the celsius subchannel */
699 for (i = 0; i < 5; i++)
700 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
701 nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
702 nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
703
704 /* Inject NV10TCL_DMA_VTXBUF */
705 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
706 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
707 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
708 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
709 nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
710 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
711 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
712
713 /* Restore the FIFO state */
714 for (i = 0; i < ARRAY_SIZE(fifo); i++)
715 nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
716
717 nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
718 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
719 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
720 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
721
722 /* Restore the current ctx object */
723 for (i = 0; i < 5; i++)
724 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
725 nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
726}
727
728static int
729nv10_graph_load_context(struct nouveau_channel *chan)
730{
731 struct drm_device *dev = chan->dev;
732 struct drm_nouveau_private *dev_priv = dev->dev_private;
733 struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
734 uint32_t tmp;
735 int i;
736
737 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
738 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
739 if (dev_priv->chipset >= 0x17) {
740 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
741 nv_wr32(dev, nv17_graph_ctx_regs[i],
742 pgraph_ctx->nv17[i]);
743 }
744
745 nv10_graph_load_pipe(chan);
746 nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
747 & 0xffff));
748
749 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
750 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
751 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
752 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
753 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
754 return 0;
755}
756
757static int
758nv10_graph_unload_context(struct drm_device *dev)
759{
760 struct drm_nouveau_private *dev_priv = dev->dev_private;
761 struct nouveau_channel *chan;
762 struct graph_state *ctx;
763 uint32_t tmp;
764 int i;
765
766 chan = nv10_graph_channel(dev);
767 if (!chan)
768 return 0;
769 ctx = chan->engctx[NVOBJ_ENGINE_GR];
770
771 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
772 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
773
774 if (dev_priv->chipset >= 0x17) {
775 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
776 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
777 }
778
779 nv10_graph_save_pipe(chan);
780
781 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
782 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
783 tmp |= 31 << 24;
784 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
785 return 0;
786}
787
788static void
789nv10_graph_context_switch(struct drm_device *dev)
790{
791 struct drm_nouveau_private *dev_priv = dev->dev_private;
792 struct nouveau_channel *chan = NULL;
793 int chid;
794
795 nouveau_wait_for_idle(dev);
796
797 /* If previous context is valid, we need to save it */
798 nv10_graph_unload_context(dev);
799
800 /* Load context for next channel */
801 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
802 chan = dev_priv->channels.ptr[chid];
803 if (chan && chan->engctx[NVOBJ_ENGINE_GR])
804 nv10_graph_load_context(chan);
805}
806
807#define NV_WRITE_CTX(reg, val) do { \
808 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
809 if (offset > 0) \
810 pgraph_ctx->nv10[offset] = val; \
811 } while (0)
812
813#define NV17_WRITE_CTX(reg, val) do { \
814 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
815 if (offset > 0) \
816 pgraph_ctx->nv17[offset] = val; \
817 } while (0)
818
819struct nouveau_channel *
820nv10_graph_channel(struct drm_device *dev)
821{
822 struct drm_nouveau_private *dev_priv = dev->dev_private;
823 int chid = 31;
824
825 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
826 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
827
828 if (chid >= 31)
829 return NULL;
830
831 return dev_priv->channels.ptr[chid];
832}
833
834static int
835nv10_graph_context_new(struct nouveau_channel *chan, int engine)
836{
837 struct drm_device *dev = chan->dev;
838 struct drm_nouveau_private *dev_priv = dev->dev_private;
839 struct graph_state *pgraph_ctx;
840
841 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
842
843 pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
844 if (pgraph_ctx == NULL)
845 return -ENOMEM;
846 chan->engctx[engine] = pgraph_ctx;
847
848 NV_WRITE_CTX(0x00400e88, 0x08000000);
849 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
850 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
851 NV_WRITE_CTX(0x00400e10, 0x00001000);
852 NV_WRITE_CTX(0x00400e14, 0x00001000);
853 NV_WRITE_CTX(0x00400e30, 0x00080008);
854 NV_WRITE_CTX(0x00400e34, 0x00080008);
855 if (dev_priv->chipset >= 0x17) {
856 /* is it really needed ??? */
857 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
858 nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
859 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
860 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
861 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
862 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
863 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
864 }
865 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
866
867 nv10_graph_create_pipe(chan);
868 return 0;
869}
870
871static void
872nv10_graph_context_del(struct nouveau_channel *chan, int engine)
873{
874 struct drm_device *dev = chan->dev;
875 struct drm_nouveau_private *dev_priv = dev->dev_private;
876 struct graph_state *pgraph_ctx = chan->engctx[engine];
877 unsigned long flags;
878
879 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
880 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
881
882 /* Unload the context if it's the currently active one */
883 if (nv10_graph_channel(dev) == chan)
884 nv10_graph_unload_context(dev);
885
886 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
887 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
888
889 /* Free the context resources */
890 chan->engctx[engine] = NULL;
891 kfree(pgraph_ctx);
892}
893
894static void
895nv10_graph_set_tile_region(struct drm_device *dev, int i)
896{
897 struct drm_nouveau_private *dev_priv = dev->dev_private;
898 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
899
900 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
901 nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
902 nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
903}
904
905static int
906nv10_graph_init(struct drm_device *dev, int engine)
907{
908 struct drm_nouveau_private *dev_priv = dev->dev_private;
909 u32 tmp;
910 int i;
911
912 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
913 ~NV_PMC_ENABLE_PGRAPH);
914 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
915 NV_PMC_ENABLE_PGRAPH);
916
917 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
918 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
919
920 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
921 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
922 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
923 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
924 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
925 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
926 (1<<29) |
927 (1<<31));
928 if (dev_priv->chipset >= 0x17) {
929 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
930 nv_wr32(dev, 0x400a10, 0x3ff3fb6);
931 nv_wr32(dev, 0x400838, 0x2f8684);
932 nv_wr32(dev, 0x40083c, 0x115f3f);
933 nv_wr32(dev, 0x004006b0, 0x40000020);
934 } else
935 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
936
937 /* Turn all the tiling regions off. */
938 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
939 nv10_graph_set_tile_region(dev, i);
940
941 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
942 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
943 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
944 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
945 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
946 nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
947
948 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
949 tmp |= 31 << 24;
950 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
951 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
952 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
953
954 return 0;
955}
956
957static int
958nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
959{
960 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
961 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
962 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
963 return -EBUSY;
964 }
965 nv10_graph_unload_context(dev);
966 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
967 return 0;
968}
969
970static int
971nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
972 u32 class, u32 mthd, u32 data)
973{
974 struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
975 struct drm_device *dev = chan->dev;
976 struct pipe_state *pipe = &ctx->pipe_state;
977 uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
978 uint32_t xfmode0, xfmode1;
979 int i;
980
981 ctx->lma_window[(mthd - 0x1638) / 4] = data;
982
983 if (mthd != 0x1644)
984 return 0;
985
986 nouveau_wait_for_idle(dev);
987
988 PIPE_SAVE(dev, pipe_0x0040, 0x0040);
989 PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
990
991 PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
992
993 nouveau_wait_for_idle(dev);
994
995 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
996 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
997
998 PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
999 PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
1000 PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
1001 PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
1002
1003 nouveau_wait_for_idle(dev);
1004
1005 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
1006 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
1007 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
1008 for (i = 0; i < 4; i++)
1009 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1010 for (i = 0; i < 4; i++)
1011 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1012
1013 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
1014 for (i = 0; i < 3; i++)
1015 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
1016
1017 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
1018 for (i = 0; i < 3; i++)
1019 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1020
1021 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
1022 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
1023
1024 PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
1025
1026 nouveau_wait_for_idle(dev);
1027
1028 PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
1029
1030 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
1031 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
1032
1033 PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
1034 PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
1035 PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
1036 PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
1037
1038 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
1039 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
1040
1041 nouveau_wait_for_idle(dev);
1042
1043 return 0;
1044}
1045
1046static int
1047nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
1048 u32 class, u32 mthd, u32 data)
1049{
1050 struct drm_device *dev = chan->dev;
1051
1052 nouveau_wait_for_idle(dev);
1053
1054 nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
1055 nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
1056 nv_wr32(dev, 0x004006b0,
1057 nv_rd32(dev, 0x004006b0) | 0x8 << 24);
1058
1059 return 0;
1060}
1061
1062struct nouveau_bitfield nv10_graph_intr[] = {
1063 { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
1064 { NV_PGRAPH_INTR_ERROR, "ERROR" },
1065 {}
1066};
1067
1068struct nouveau_bitfield nv10_graph_nstatus[] = {
1069 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
1070 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
1071 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
1072 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
1073 {}
1074};
1075
1076static void
1077nv10_graph_isr(struct drm_device *dev)
1078{
1079 u32 stat;
1080
1081 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
1082 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
1083 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
1084 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
1085 u32 chid = (addr & 0x01f00000) >> 20;
1086 u32 subc = (addr & 0x00070000) >> 16;
1087 u32 mthd = (addr & 0x00001ffc);
1088 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
1089 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
1090 u32 show = stat;
1091
1092 if (stat & NV_PGRAPH_INTR_ERROR) {
1093 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
1094 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
1095 show &= ~NV_PGRAPH_INTR_ERROR;
1096 }
1097 }
1098
1099 if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
1100 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
1101 stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1102 show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
1103 nv10_graph_context_switch(dev);
1104 }
1105
1106 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
1107 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
1108
1109 if (show && nouveau_ratelimit()) {
1110 NV_INFO(dev, "PGRAPH -");
1111 nouveau_bitfield_print(nv10_graph_intr, show);
1112 printk(" nsource:");
1113 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1114 printk(" nstatus:");
1115 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1116 printk("\n");
1117 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
1118 "mthd 0x%04x data 0x%08x\n",
1119 chid, subc, class, mthd, data);
1120 }
1121 }
1122}
1123
1124static void
1125nv10_graph_destroy(struct drm_device *dev, int engine)
1126{
1127 struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
1128
1129 nouveau_irq_unregister(dev, 12);
1130 kfree(pgraph);
1131}
1132
1133int
1134nv10_graph_create(struct drm_device *dev)
1135{
1136 struct drm_nouveau_private *dev_priv = dev->dev_private;
1137 struct nv10_graph_engine *pgraph;
1138
1139 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
1140 if (!pgraph)
1141 return -ENOMEM;
1142
1143 pgraph->base.destroy = nv10_graph_destroy;
1144 pgraph->base.init = nv10_graph_init;
1145 pgraph->base.fini = nv10_graph_fini;
1146 pgraph->base.context_new = nv10_graph_context_new;
1147 pgraph->base.context_del = nv10_graph_context_del;
1148 pgraph->base.object_new = nv04_graph_object_new;
1149 pgraph->base.set_tile_region = nv10_graph_set_tile_region;
1150
1151 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
1152 nouveau_irq_register(dev, 12, nv10_graph_isr);
1153
1154 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
1155 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
1156 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
1157 NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
1158 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
1159 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
1160 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
1161 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
1162 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
1163 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
1164 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
1165 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
1166 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
1167 NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
1168 NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
1169 NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
1170 NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
1171
1172 /* celcius */
1173 if (dev_priv->chipset <= 0x10) {
1174 NVOBJ_CLASS(dev, 0x0056, GR);
1175 } else
1176 if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
1177 NVOBJ_CLASS(dev, 0x0096, GR);
1178 } else {
1179 NVOBJ_CLASS(dev, 0x0099, GR);
1180 NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
1181 NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
1182 NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
1183 NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
1184 NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
1185 }
1186
1187 return 0;
1188}
diff --git a/drivers/gpu/drm/nouveau/nv17_fifo.c b/drivers/gpu/drm/nouveau/nv17_fifo.c
deleted file mode 100644
index 4ae61aeea741..000000000000
--- a/drivers/gpu/drm/nouveau/nv17_fifo.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
32
33static struct ramfc_desc {
34 unsigned bits:6;
35 unsigned ctxs:5;
36 unsigned ctxp:8;
37 unsigned regs:5;
38 unsigned regp;
39} nv17_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
49 { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
54 {}
55};
56
57struct nv17_fifo_priv {
58 struct nouveau_fifo_priv base;
59 struct ramfc_desc *ramfc_desc;
60};
61
62struct nv17_fifo_chan {
63 struct nouveau_fifo_chan base;
64 struct nouveau_gpuobj *ramfc;
65};
66
67static int
68nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
69{
70 struct drm_device *dev = chan->dev;
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
73 struct nv17_fifo_chan *fctx;
74 unsigned long flags;
75 int ret;
76
77 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
78 if (!fctx)
79 return -ENOMEM;
80
81 /* map channel control registers */
82 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
83 NV03_USER(chan->id), PAGE_SIZE);
84 if (!chan->user) {
85 ret = -ENOMEM;
86 goto error;
87 }
88
89 /* initialise default fifo context */
90 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
91 chan->id * 64, ~0, 64,
92 NVOBJ_FLAG_ZERO_ALLOC |
93 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
94 if (ret)
95 goto error;
96
97 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
98 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
99 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
100 nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
101 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
102#ifdef __BIG_ENDIAN
103 NV_PFIFO_CACHE1_BIG_ENDIAN |
104#endif
105 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
106
107 /* enable dma mode on the channel */
108 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
109 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
110 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
111
112error:
113 if (ret)
114 priv->base.base.context_del(chan, engine);
115 return ret;
116}
117
118static int
119nv17_fifo_init(struct drm_device *dev, int engine)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv17_fifo_priv *priv = nv_engine(dev, engine);
123 int i;
124
125 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
126 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
127
128 nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
129 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
130
131 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
132 ((dev_priv->ramht->bits - 9) << 16) |
133 (dev_priv->ramht->gpuobj->pinst >> 8));
134 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
135 nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
136 dev_priv->ramfc->pinst >> 8);
137
138 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
139
140 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
141 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
142
143 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
144 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
145 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
146
147 for (i = 0; i < priv->base.channels; i++) {
148 if (dev_priv->channels.ptr[i])
149 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
150 }
151
152 return 0;
153}
154
155int
156nv17_fifo_create(struct drm_device *dev)
157{
158 struct drm_nouveau_private *dev_priv = dev->dev_private;
159 struct nv17_fifo_priv *priv;
160
161 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
162 if (!priv)
163 return -ENOMEM;
164
165 priv->base.base.destroy = nv04_fifo_destroy;
166 priv->base.base.init = nv17_fifo_init;
167 priv->base.base.fini = nv04_fifo_fini;
168 priv->base.base.context_new = nv17_fifo_context_new;
169 priv->base.base.context_del = nv04_fifo_context_del;
170 priv->base.channels = 31;
171 priv->ramfc_desc = nv17_ramfc;
172 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
173
174 nouveau_irq_register(dev, 8, nv04_fifo_isr);
175 return 0;
176}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 6331e79b0124..897b63621e2d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -26,18 +26,32 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_reg.h"
30#include "nouveau_encoder.h" 31#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 32#include "nouveau_connector.h"
32#include "nouveau_crtc.h" 33#include "nouveau_crtc.h"
33#include "nouveau_gpio.h"
34#include "nouveau_hw.h" 34#include "nouveau_hw.h"
35#include "nv17_tv.h" 35#include "nv17_tv.h"
36 36
37#include <core/device.h>
38
39#include <subdev/bios/gpio.h>
40#include <subdev/gpio.h>
41
42MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
43 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
44 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
45 "\t\tDefault: PAL\n"
46 "\t\t*NOTE* Ignored for cards with external TV encoders.");
47static char *nouveau_tv_norm;
48module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
49
37static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) 50static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
38{ 51{
39 struct drm_device *dev = encoder->dev; 52 struct drm_device *dev = encoder->dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private; 53 struct nouveau_drm *drm = nouveau_drm(dev);
54 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
41 uint32_t testval, regoffset = nv04_dac_output_offset(encoder); 55 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
42 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, 56 uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
43 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; 57 fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -46,15 +60,15 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
46 60
47#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) 61#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
48 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); 62 testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
49 if (dev_priv->vbios.tvdactestval) 63 if (drm->vbios.tvdactestval)
50 testval = dev_priv->vbios.tvdactestval; 64 testval = drm->vbios.tvdactestval;
51 65
52 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); 66 dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
53 head = (dacclk & 0x100) >> 8; 67 head = (dacclk & 0x100) >> 8;
54 68
55 /* Save the previous state. */ 69 /* Save the previous state. */
56 gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); 70 gpio1 = gpio->get(gpio, 0, DCB_GPIO_TVDAC1, 0xff);
57 gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); 71 gpio0 = gpio->get(gpio, 0, DCB_GPIO_TVDAC0, 0xff);
58 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); 72 fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
59 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); 73 fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
60 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); 74 fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +79,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
65 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); 79 ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
66 80
67 /* Prepare the DAC for load detection. */ 81 /* Prepare the DAC for load detection. */
68 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true); 82 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, true);
69 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true); 83 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, true);
70 84
71 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); 85 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
72 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); 86 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +125,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
111 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); 125 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
112 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); 126 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
113 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); 127 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
114 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1); 128 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, gpio1);
115 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0); 129 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, gpio0);
116 130
117 return sample; 131 return sample;
118} 132}
@@ -120,15 +134,18 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
120static bool 134static bool
121get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) 135get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
122{ 136{
137 struct nouveau_drm *drm = nouveau_drm(dev);
138 struct nouveau_object *device = drm->device;
139
123 /* Zotac FX5200 */ 140 /* Zotac FX5200 */
124 if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) || 141 if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
125 nv_match_device(dev, 0x0322, 0x19da, 0x2035)) { 142 nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
126 *pin_mask = 0xc; 143 *pin_mask = 0xc;
127 return false; 144 return false;
128 } 145 }
129 146
130 /* MSI nForce2 IGP */ 147 /* MSI nForce2 IGP */
131 if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) { 148 if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
132 *pin_mask = 0xc; 149 *pin_mask = 0xc;
133 return false; 150 return false;
134 } 151 }
@@ -140,18 +157,18 @@ static enum drm_connector_status
140nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) 157nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
141{ 158{
142 struct drm_device *dev = encoder->dev; 159 struct drm_device *dev = encoder->dev;
143 struct drm_nouveau_private *dev_priv = dev->dev_private; 160 struct nouveau_drm *drm = nouveau_drm(dev);
144 struct drm_mode_config *conf = &dev->mode_config; 161 struct drm_mode_config *conf = &dev->mode_config;
145 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 162 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
146 struct dcb_entry *dcb = tv_enc->base.dcb; 163 struct dcb_output *dcb = tv_enc->base.dcb;
147 bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); 164 bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
148 165
149 if (nv04_dac_in_use(encoder)) 166 if (nv04_dac_in_use(encoder))
150 return connector_status_disconnected; 167 return connector_status_disconnected;
151 168
152 if (reliable) { 169 if (reliable) {
153 if (dev_priv->chipset == 0x42 || 170 if (nv_device(drm->device)->chipset == 0x42 ||
154 dev_priv->chipset == 0x43) 171 nv_device(drm->device)->chipset == 0x43)
155 tv_enc->pin_mask = 172 tv_enc->pin_mask =
156 nv42_tv_sample_load(encoder) >> 28 & 0xe; 173 nv42_tv_sample_load(encoder) >> 28 & 0xe;
157 else 174 else
@@ -185,7 +202,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
185 if (!reliable) { 202 if (!reliable) {
186 return connector_status_unknown; 203 return connector_status_unknown;
187 } else if (tv_enc->subconnector) { 204 } else if (tv_enc->subconnector) {
188 NV_INFO(dev, "Load detected on output %c\n", 205 NV_INFO(drm, "Load detected on output %c\n",
189 '@' + ffs(dcb->or)); 206 '@' + ffs(dcb->or));
190 return connector_status_connected; 207 return connector_status_connected;
191 } else { 208 } else {
@@ -357,6 +374,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
357static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) 374static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
358{ 375{
359 struct drm_device *dev = encoder->dev; 376 struct drm_device *dev = encoder->dev;
377 struct nouveau_drm *drm = nouveau_drm(dev);
378 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
360 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; 379 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
361 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 380 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
362 381
@@ -364,7 +383,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
364 return; 383 return;
365 nouveau_encoder(encoder)->last_dpms = mode; 384 nouveau_encoder(encoder)->last_dpms = mode;
366 385
367 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", 386 NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
368 mode, nouveau_encoder(encoder)->dcb->index); 387 mode, nouveau_encoder(encoder)->dcb->index);
369 388
370 regs->ptv_200 &= ~1; 389 regs->ptv_200 &= ~1;
@@ -381,8 +400,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
381 400
382 nv_load_ptv(dev, regs, 200); 401 nv_load_ptv(dev, regs, 200);
383 402
384 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); 403 gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, mode == DRM_MODE_DPMS_ON);
385 nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); 404 gpio->set(gpio, 0, DCB_GPIO_TVDAC0, 0xff, mode == DRM_MODE_DPMS_ON);
386 405
387 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); 406 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
388} 407}
@@ -390,11 +409,11 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
390static void nv17_tv_prepare(struct drm_encoder *encoder) 409static void nv17_tv_prepare(struct drm_encoder *encoder)
391{ 410{
392 struct drm_device *dev = encoder->dev; 411 struct drm_device *dev = encoder->dev;
393 struct drm_nouveau_private *dev_priv = dev->dev_private; 412 struct nouveau_drm *drm = nouveau_drm(dev);
394 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 413 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
395 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 414 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
396 int head = nouveau_crtc(encoder->crtc)->index; 415 int head = nouveau_crtc(encoder->crtc)->index;
397 uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[ 416 uint8_t *cr_lcd = &nv04_display(dev)->mode_reg.crtc_reg[head].CRTC[
398 NV_CIO_CRE_LCD__INDEX]; 417 NV_CIO_CRE_LCD__INDEX];
399 uint32_t dacclk_off = NV_PRAMDAC_DACCLK + 418 uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
400 nv04_dac_output_offset(encoder); 419 nv04_dac_output_offset(encoder);
@@ -410,14 +429,14 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
410 struct drm_encoder *enc; 429 struct drm_encoder *enc;
411 430
412 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 431 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
413 struct dcb_entry *dcb = nouveau_encoder(enc)->dcb; 432 struct dcb_output *dcb = nouveau_encoder(enc)->dcb;
414 433
415 if ((dcb->type == OUTPUT_TMDS || 434 if ((dcb->type == DCB_OUTPUT_TMDS ||
416 dcb->type == OUTPUT_LVDS) && 435 dcb->type == DCB_OUTPUT_LVDS) &&
417 !enc->crtc && 436 !enc->crtc &&
418 nv04_dfp_get_bound_head(dev, dcb) == head) { 437 nv04_dfp_get_bound_head(dev, dcb) == head) {
419 nv04_dfp_bind_head(dev, dcb, head ^ 1, 438 nv04_dfp_bind_head(dev, dcb, head ^ 1,
420 dev_priv->vbios.fp.dual_link); 439 drm->vbios.fp.dual_link);
421 } 440 }
422 } 441 }
423 442
@@ -429,7 +448,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
429 /* Set the DACCLK register */ 448 /* Set the DACCLK register */
430 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; 449 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
431 450
432 if (dev_priv->card_type == NV_40) 451 if (nv_device(drm->device)->card_type == NV_40)
433 dacclk |= 0x1a << 16; 452 dacclk |= 0x1a << 16;
434 453
435 if (tv_norm->kind == CTV_ENC_MODE) { 454 if (tv_norm->kind == CTV_ENC_MODE) {
@@ -453,9 +472,9 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
453 struct drm_display_mode *adjusted_mode) 472 struct drm_display_mode *adjusted_mode)
454{ 473{
455 struct drm_device *dev = encoder->dev; 474 struct drm_device *dev = encoder->dev;
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 475 struct nouveau_drm *drm = nouveau_drm(dev);
457 int head = nouveau_crtc(encoder->crtc)->index; 476 int head = nouveau_crtc(encoder->crtc)->index;
458 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; 477 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
459 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; 478 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
460 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); 479 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
461 int i; 480 int i;
@@ -486,7 +505,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
486 tv_regs->ptv_614 = 0x13; 505 tv_regs->ptv_614 = 0x13;
487 } 506 }
488 507
489 if (dev_priv->card_type >= NV_30) { 508 if (nv_device(drm->device)->card_type >= NV_30) {
490 tv_regs->ptv_500 = 0xe8e0; 509 tv_regs->ptv_500 = 0xe8e0;
491 tv_regs->ptv_504 = 0x1710; 510 tv_regs->ptv_504 = 0x1710;
492 tv_regs->ptv_604 = 0x0; 511 tv_regs->ptv_604 = 0x0;
@@ -566,7 +585,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
566static void nv17_tv_commit(struct drm_encoder *encoder) 585static void nv17_tv_commit(struct drm_encoder *encoder)
567{ 586{
568 struct drm_device *dev = encoder->dev; 587 struct drm_device *dev = encoder->dev;
569 struct drm_nouveau_private *dev_priv = dev->dev_private; 588 struct nouveau_drm *drm = nouveau_drm(dev);
570 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 589 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
571 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 590 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
572 struct drm_encoder_helper_funcs *helper = encoder->helper_private; 591 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
@@ -581,7 +600,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
581 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); 600 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
582 601
583 /* This could use refinement for flatpanels, but it should work */ 602 /* This could use refinement for flatpanels, but it should work */
584 if (dev_priv->chipset < 0x44) 603 if (nv_device(drm->device)->chipset < 0x44)
585 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + 604 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
586 nv04_dac_output_offset(encoder), 605 nv04_dac_output_offset(encoder),
587 0xf0000000); 606 0xf0000000);
@@ -592,7 +611,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
592 611
593 helper->dpms(encoder, DRM_MODE_DPMS_ON); 612 helper->dpms(encoder, DRM_MODE_DPMS_ON);
594 613
595 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", 614 NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
596 drm_get_connector_name( 615 drm_get_connector_name(
597 &nouveau_encoder_connector_get(nv_encoder)->base), 616 &nouveau_encoder_connector_get(nv_encoder)->base),
598 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); 617 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
@@ -630,9 +649,10 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
630 struct drm_connector *connector) 649 struct drm_connector *connector)
631{ 650{
632 struct drm_device *dev = encoder->dev; 651 struct drm_device *dev = encoder->dev;
652 struct nouveau_drm *drm = nouveau_drm(dev);
633 struct drm_mode_config *conf = &dev->mode_config; 653 struct drm_mode_config *conf = &dev->mode_config;
634 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 654 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
635 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; 655 struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
636 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : 656 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
637 NUM_LD_TV_NORMS; 657 NUM_LD_TV_NORMS;
638 int i; 658 int i;
@@ -646,7 +666,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
646 } 666 }
647 667
648 if (i == num_tv_norms) 668 if (i == num_tv_norms)
649 NV_WARN(dev, "Invalid TV norm setting \"%s\"\n", 669 NV_WARN(drm, "Invalid TV norm setting \"%s\"\n",
650 nouveau_tv_norm); 670 nouveau_tv_norm);
651 } 671 }
652 672
@@ -759,8 +779,6 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
759{ 779{
760 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 780 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
761 781
762 NV_DEBUG_KMS(encoder->dev, "\n");
763
764 drm_encoder_cleanup(encoder); 782 drm_encoder_cleanup(encoder);
765 kfree(tv_enc); 783 kfree(tv_enc);
766} 784}
@@ -788,7 +806,7 @@ static struct drm_encoder_funcs nv17_tv_funcs = {
788}; 806};
789 807
790int 808int
791nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) 809nv17_tv_create(struct drm_connector *connector, struct dcb_output *entry)
792{ 810{
793 struct drm_device *dev = connector->dev; 811 struct drm_device *dev = connector->dev;
794 struct drm_encoder *encoder; 812 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 622e72221682..7b331543a41b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -130,12 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, 130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
131 uint32_t val) 131 uint32_t val)
132{ 132{
133 nv_wr32(dev, reg, val); 133 struct nouveau_device *device = nouveau_dev(dev);
134 nv_wr32(device, reg, val);
134} 135}
135 136
136static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) 137static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
137{ 138{
138 return nv_rd32(dev, reg); 139 struct nouveau_device *device = nouveau_dev(dev);
140 return nv_rd32(device, reg);
139} 141}
140 142
141static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, 143static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 96e428641672..1cdfe2a5875d 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -26,7 +26,7 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include "nouveau_drv.h" 29#include "nouveau_drm.h"
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_hw.h" 32#include "nouveau_hw.h"
@@ -543,10 +543,9 @@ void nv17_tv_update_rescaler(struct drm_encoder *encoder)
543void nv17_ctv_update_rescaler(struct drm_encoder *encoder) 543void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
544{ 544{
545 struct drm_device *dev = encoder->dev; 545 struct drm_device *dev = encoder->dev;
546 struct drm_nouveau_private *dev_priv = dev->dev_private;
547 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); 546 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
548 int head = nouveau_crtc(encoder->crtc)->index; 547 int head = nouveau_crtc(encoder->crtc)->index;
549 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; 548 struct nv04_crtc_reg *regs = &nv04_display(dev)->mode_reg.crtc_reg[head];
550 struct drm_display_mode *crtc_mode = &encoder->crtc->mode; 549 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
551 struct drm_display_mode *output_mode = 550 struct drm_display_mode *output_mode =
552 &get_tv_norm(encoder)->ctv_enc_mode.mode; 551 &get_tv_norm(encoder)->ctv_enc_mode.mode;
diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
deleted file mode 100644
index 5fffc2150b8e..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_fb.c
+++ /dev/null
@@ -1,147 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5static struct drm_mm_node *
6nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
7{
8 struct drm_nouveau_private *dev_priv = dev->dev_private;
9 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
10 struct drm_mm_node *mem;
11 int ret;
12
13 ret = drm_mm_pre_get(&pfb->tag_heap);
14 if (ret)
15 return NULL;
16
17 spin_lock(&dev_priv->tile.lock);
18 mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
19 if (mem)
20 mem = drm_mm_get_block_atomic(mem, size, 0);
21 spin_unlock(&dev_priv->tile.lock);
22
23 return mem;
24}
25
26static void
27nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
28{
29 struct drm_nouveau_private *dev_priv = dev->dev_private;
30 struct drm_mm_node *mem = *pmem;
31 if (mem) {
32 spin_lock(&dev_priv->tile.lock);
33 drm_mm_put_block(mem);
34 spin_unlock(&dev_priv->tile.lock);
35 *pmem = NULL;
36 }
37}
38
39void
40nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
41 uint32_t size, uint32_t pitch, uint32_t flags)
42{
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
45 int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
46
47 tile->addr = 0x00000001 | addr;
48 tile->limit = max(1u, addr + size) - 1;
49 tile->pitch = pitch;
50
51 /* Allocate some of the on-die tag memory, used to store Z
52 * compression meta-data (most likely just a bitmap determining
53 * if a given tile is compressed or not).
54 */
55 if (flags & NOUVEAU_GEM_TILE_ZETA) {
56 tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
57 if (tile->tag_mem) {
58 /* Enable Z compression */
59 tile->zcomp = tile->tag_mem->start;
60 if (dev_priv->chipset >= 0x25) {
61 if (bpp == 16)
62 tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
63 else
64 tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
65 } else {
66 tile->zcomp |= NV20_PFB_ZCOMP_EN;
67 if (bpp != 16)
68 tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
69 }
70 }
71
72 tile->addr |= 2;
73 }
74}
75
76void
77nv20_fb_free_tile_region(struct drm_device *dev, int i)
78{
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
81
82 tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
83 nv20_fb_free_tag(dev, &tile->tag_mem);
84}
85
86void
87nv20_fb_set_tile_region(struct drm_device *dev, int i)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
91
92 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
93 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
94 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
95 nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
96}
97
98int
99nv20_fb_vram_init(struct drm_device *dev)
100{
101 struct drm_nouveau_private *dev_priv = dev->dev_private;
102 u32 mem_size = nv_rd32(dev, 0x10020c);
103 u32 pbus1218 = nv_rd32(dev, 0x001218);
104
105 dev_priv->vram_size = mem_size & 0xff000000;
106 switch (pbus1218 & 0x00000300) {
107 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
108 case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
109 case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
110 case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
111 }
112
113 return 0;
114}
115
116int
117nv20_fb_init(struct drm_device *dev)
118{
119 struct drm_nouveau_private *dev_priv = dev->dev_private;
120 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
121 int i;
122
123 if (dev_priv->chipset >= 0x25)
124 drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
125 else
126 drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
127
128 /* Turn all the tiling regions off. */
129 pfb->num_tiles = NV10_PFB_TILE__SIZE;
130 for (i = 0; i < pfb->num_tiles; i++)
131 pfb->set_tile_region(dev, i);
132
133 return 0;
134}
135
136void
137nv20_fb_takedown(struct drm_device *dev)
138{
139 struct drm_nouveau_private *dev_priv = dev->dev_private;
140 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
141 int i;
142
143 for (i = 0; i < pfb->num_tiles; i++)
144 pfb->free_tile_region(dev, i);
145
146 drm_mm_takedown(&pfb->tag_heap);
147}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
deleted file mode 100644
index ffaab0ba76b9..000000000000
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ /dev/null
@@ -1,835 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5/*
6 * NV20
7 * -----
8 * There are 3 families :
9 * NV20 is 0x10de:0x020*
10 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
11 * NV2A is 0x10de:0x02A0
12 *
13 * NV30
14 * -----
15 * There are 3 families :
16 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
17 * NV34 is 0x10de:0x032*
18 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
19 *
20 * Not seen in the wild, no dumps (probably NV35) :
21 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
22 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
23 *
24 */
25
26struct nv20_graph_engine {
27 struct nouveau_exec_engine base;
28 struct nouveau_gpuobj *ctxtab;
29 void (*grctx_init)(struct nouveau_gpuobj *);
30 u32 grctx_size;
31 u32 grctx_user;
32};
33
34#define NV20_GRCTX_SIZE (3580*4)
35#define NV25_GRCTX_SIZE (3529*4)
36#define NV2A_GRCTX_SIZE (3500*4)
37
38#define NV30_31_GRCTX_SIZE (24392)
39#define NV34_GRCTX_SIZE (18140)
40#define NV35_36_GRCTX_SIZE (22396)
41
42int
43nv20_graph_unload_context(struct drm_device *dev)
44{
45 struct nouveau_channel *chan;
46 struct nouveau_gpuobj *grctx;
47 u32 tmp;
48
49 chan = nv10_graph_channel(dev);
50 if (!chan)
51 return 0;
52 grctx = chan->engctx[NVOBJ_ENGINE_GR];
53
54 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
55 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
56 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
57
58 nouveau_wait_for_idle(dev);
59
60 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
61 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
62 tmp |= 31 << 24;
63 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
64 return 0;
65}
66
67static void
68nv20_graph_rdi(struct drm_device *dev)
69{
70 struct drm_nouveau_private *dev_priv = dev->dev_private;
71 int i, writecount = 32;
72 uint32_t rdi_index = 0x2c80000;
73
74 if (dev_priv->chipset == 0x20) {
75 rdi_index = 0x3d0000;
76 writecount = 15;
77 }
78
79 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
80 for (i = 0; i < writecount; i++)
81 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
82
83 nouveau_wait_for_idle(dev);
84}
85
86static void
87nv20_graph_context_init(struct nouveau_gpuobj *ctx)
88{
89 int i;
90
91 nv_wo32(ctx, 0x033c, 0xffff0000);
92 nv_wo32(ctx, 0x03a0, 0x0fff0000);
93 nv_wo32(ctx, 0x03a4, 0x0fff0000);
94 nv_wo32(ctx, 0x047c, 0x00000101);
95 nv_wo32(ctx, 0x0490, 0x00000111);
96 nv_wo32(ctx, 0x04a8, 0x44400000);
97 for (i = 0x04d4; i <= 0x04e0; i += 4)
98 nv_wo32(ctx, i, 0x00030303);
99 for (i = 0x04f4; i <= 0x0500; i += 4)
100 nv_wo32(ctx, i, 0x00080000);
101 for (i = 0x050c; i <= 0x0518; i += 4)
102 nv_wo32(ctx, i, 0x01012000);
103 for (i = 0x051c; i <= 0x0528; i += 4)
104 nv_wo32(ctx, i, 0x000105b8);
105 for (i = 0x052c; i <= 0x0538; i += 4)
106 nv_wo32(ctx, i, 0x00080008);
107 for (i = 0x055c; i <= 0x0598; i += 4)
108 nv_wo32(ctx, i, 0x07ff0000);
109 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
110 nv_wo32(ctx, 0x05fc, 0x00000001);
111 nv_wo32(ctx, 0x0604, 0x00004000);
112 nv_wo32(ctx, 0x0610, 0x00000001);
113 nv_wo32(ctx, 0x0618, 0x00040000);
114 nv_wo32(ctx, 0x061c, 0x00010000);
115 for (i = 0x1c1c; i <= 0x248c; i += 16) {
116 nv_wo32(ctx, (i + 0), 0x10700ff9);
117 nv_wo32(ctx, (i + 4), 0x0436086c);
118 nv_wo32(ctx, (i + 8), 0x000c001b);
119 }
120 nv_wo32(ctx, 0x281c, 0x3f800000);
121 nv_wo32(ctx, 0x2830, 0x3f800000);
122 nv_wo32(ctx, 0x285c, 0x40000000);
123 nv_wo32(ctx, 0x2860, 0x3f800000);
124 nv_wo32(ctx, 0x2864, 0x3f000000);
125 nv_wo32(ctx, 0x286c, 0x40000000);
126 nv_wo32(ctx, 0x2870, 0x3f800000);
127 nv_wo32(ctx, 0x2878, 0xbf800000);
128 nv_wo32(ctx, 0x2880, 0xbf800000);
129 nv_wo32(ctx, 0x34a4, 0x000fe000);
130 nv_wo32(ctx, 0x3530, 0x000003f8);
131 nv_wo32(ctx, 0x3540, 0x002fe000);
132 for (i = 0x355c; i <= 0x3578; i += 4)
133 nv_wo32(ctx, i, 0x001c527c);
134}
135
136static void
137nv25_graph_context_init(struct nouveau_gpuobj *ctx)
138{
139 int i;
140
141 nv_wo32(ctx, 0x035c, 0xffff0000);
142 nv_wo32(ctx, 0x03c0, 0x0fff0000);
143 nv_wo32(ctx, 0x03c4, 0x0fff0000);
144 nv_wo32(ctx, 0x049c, 0x00000101);
145 nv_wo32(ctx, 0x04b0, 0x00000111);
146 nv_wo32(ctx, 0x04c8, 0x00000080);
147 nv_wo32(ctx, 0x04cc, 0xffff0000);
148 nv_wo32(ctx, 0x04d0, 0x00000001);
149 nv_wo32(ctx, 0x04e4, 0x44400000);
150 nv_wo32(ctx, 0x04fc, 0x4b800000);
151 for (i = 0x0510; i <= 0x051c; i += 4)
152 nv_wo32(ctx, i, 0x00030303);
153 for (i = 0x0530; i <= 0x053c; i += 4)
154 nv_wo32(ctx, i, 0x00080000);
155 for (i = 0x0548; i <= 0x0554; i += 4)
156 nv_wo32(ctx, i, 0x01012000);
157 for (i = 0x0558; i <= 0x0564; i += 4)
158 nv_wo32(ctx, i, 0x000105b8);
159 for (i = 0x0568; i <= 0x0574; i += 4)
160 nv_wo32(ctx, i, 0x00080008);
161 for (i = 0x0598; i <= 0x05d4; i += 4)
162 nv_wo32(ctx, i, 0x07ff0000);
163 nv_wo32(ctx, 0x05e0, 0x4b7fffff);
164 nv_wo32(ctx, 0x0620, 0x00000080);
165 nv_wo32(ctx, 0x0624, 0x30201000);
166 nv_wo32(ctx, 0x0628, 0x70605040);
167 nv_wo32(ctx, 0x062c, 0xb0a09080);
168 nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
169 nv_wo32(ctx, 0x0664, 0x00000001);
170 nv_wo32(ctx, 0x066c, 0x00004000);
171 nv_wo32(ctx, 0x0678, 0x00000001);
172 nv_wo32(ctx, 0x0680, 0x00040000);
173 nv_wo32(ctx, 0x0684, 0x00010000);
174 for (i = 0x1b04; i <= 0x2374; i += 16) {
175 nv_wo32(ctx, (i + 0), 0x10700ff9);
176 nv_wo32(ctx, (i + 4), 0x0436086c);
177 nv_wo32(ctx, (i + 8), 0x000c001b);
178 }
179 nv_wo32(ctx, 0x2704, 0x3f800000);
180 nv_wo32(ctx, 0x2718, 0x3f800000);
181 nv_wo32(ctx, 0x2744, 0x40000000);
182 nv_wo32(ctx, 0x2748, 0x3f800000);
183 nv_wo32(ctx, 0x274c, 0x3f000000);
184 nv_wo32(ctx, 0x2754, 0x40000000);
185 nv_wo32(ctx, 0x2758, 0x3f800000);
186 nv_wo32(ctx, 0x2760, 0xbf800000);
187 nv_wo32(ctx, 0x2768, 0xbf800000);
188 nv_wo32(ctx, 0x308c, 0x000fe000);
189 nv_wo32(ctx, 0x3108, 0x000003f8);
190 nv_wo32(ctx, 0x3468, 0x002fe000);
191 for (i = 0x3484; i <= 0x34a0; i += 4)
192 nv_wo32(ctx, i, 0x001c527c);
193}
194
195static void
196nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
197{
198 int i;
199
200 nv_wo32(ctx, 0x033c, 0xffff0000);
201 nv_wo32(ctx, 0x03a0, 0x0fff0000);
202 nv_wo32(ctx, 0x03a4, 0x0fff0000);
203 nv_wo32(ctx, 0x047c, 0x00000101);
204 nv_wo32(ctx, 0x0490, 0x00000111);
205 nv_wo32(ctx, 0x04a8, 0x44400000);
206 for (i = 0x04d4; i <= 0x04e0; i += 4)
207 nv_wo32(ctx, i, 0x00030303);
208 for (i = 0x04f4; i <= 0x0500; i += 4)
209 nv_wo32(ctx, i, 0x00080000);
210 for (i = 0x050c; i <= 0x0518; i += 4)
211 nv_wo32(ctx, i, 0x01012000);
212 for (i = 0x051c; i <= 0x0528; i += 4)
213 nv_wo32(ctx, i, 0x000105b8);
214 for (i = 0x052c; i <= 0x0538; i += 4)
215 nv_wo32(ctx, i, 0x00080008);
216 for (i = 0x055c; i <= 0x0598; i += 4)
217 nv_wo32(ctx, i, 0x07ff0000);
218 nv_wo32(ctx, 0x05a4, 0x4b7fffff);
219 nv_wo32(ctx, 0x05fc, 0x00000001);
220 nv_wo32(ctx, 0x0604, 0x00004000);
221 nv_wo32(ctx, 0x0610, 0x00000001);
222 nv_wo32(ctx, 0x0618, 0x00040000);
223 nv_wo32(ctx, 0x061c, 0x00010000);
224 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
225 nv_wo32(ctx, (i + 0), 0x10700ff9);
226 nv_wo32(ctx, (i + 4), 0x0436086c);
227 nv_wo32(ctx, (i + 8), 0x000c001b);
228 }
229 nv_wo32(ctx, 0x269c, 0x3f800000);
230 nv_wo32(ctx, 0x26b0, 0x3f800000);
231 nv_wo32(ctx, 0x26dc, 0x40000000);
232 nv_wo32(ctx, 0x26e0, 0x3f800000);
233 nv_wo32(ctx, 0x26e4, 0x3f000000);
234 nv_wo32(ctx, 0x26ec, 0x40000000);
235 nv_wo32(ctx, 0x26f0, 0x3f800000);
236 nv_wo32(ctx, 0x26f8, 0xbf800000);
237 nv_wo32(ctx, 0x2700, 0xbf800000);
238 nv_wo32(ctx, 0x3024, 0x000fe000);
239 nv_wo32(ctx, 0x30a0, 0x000003f8);
240 nv_wo32(ctx, 0x33fc, 0x002fe000);
241 for (i = 0x341c; i <= 0x3438; i += 4)
242 nv_wo32(ctx, i, 0x001c527c);
243}
244
245static void
246nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
247{
248 int i;
249
250 nv_wo32(ctx, 0x0410, 0x00000101);
251 nv_wo32(ctx, 0x0424, 0x00000111);
252 nv_wo32(ctx, 0x0428, 0x00000060);
253 nv_wo32(ctx, 0x0444, 0x00000080);
254 nv_wo32(ctx, 0x0448, 0xffff0000);
255 nv_wo32(ctx, 0x044c, 0x00000001);
256 nv_wo32(ctx, 0x0460, 0x44400000);
257 nv_wo32(ctx, 0x048c, 0xffff0000);
258 for (i = 0x04e0; i < 0x04e8; i += 4)
259 nv_wo32(ctx, i, 0x0fff0000);
260 nv_wo32(ctx, 0x04ec, 0x00011100);
261 for (i = 0x0508; i < 0x0548; i += 4)
262 nv_wo32(ctx, i, 0x07ff0000);
263 nv_wo32(ctx, 0x0550, 0x4b7fffff);
264 nv_wo32(ctx, 0x058c, 0x00000080);
265 nv_wo32(ctx, 0x0590, 0x30201000);
266 nv_wo32(ctx, 0x0594, 0x70605040);
267 nv_wo32(ctx, 0x0598, 0xb8a89888);
268 nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
269 nv_wo32(ctx, 0x05b0, 0xb0000000);
270 for (i = 0x0600; i < 0x0640; i += 4)
271 nv_wo32(ctx, i, 0x00010588);
272 for (i = 0x0640; i < 0x0680; i += 4)
273 nv_wo32(ctx, i, 0x00030303);
274 for (i = 0x06c0; i < 0x0700; i += 4)
275 nv_wo32(ctx, i, 0x0008aae4);
276 for (i = 0x0700; i < 0x0740; i += 4)
277 nv_wo32(ctx, i, 0x01012000);
278 for (i = 0x0740; i < 0x0780; i += 4)
279 nv_wo32(ctx, i, 0x00080008);
280 nv_wo32(ctx, 0x085c, 0x00040000);
281 nv_wo32(ctx, 0x0860, 0x00010000);
282 for (i = 0x0864; i < 0x0874; i += 4)
283 nv_wo32(ctx, i, 0x00040004);
284 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
285 nv_wo32(ctx, i + 0, 0x10700ff9);
286 nv_wo32(ctx, i + 1, 0x0436086c);
287 nv_wo32(ctx, i + 2, 0x000c001b);
288 }
289 for (i = 0x30b8; i < 0x30c8; i += 4)
290 nv_wo32(ctx, i, 0x0000ffff);
291 nv_wo32(ctx, 0x344c, 0x3f800000);
292 nv_wo32(ctx, 0x3808, 0x3f800000);
293 nv_wo32(ctx, 0x381c, 0x3f800000);
294 nv_wo32(ctx, 0x3848, 0x40000000);
295 nv_wo32(ctx, 0x384c, 0x3f800000);
296 nv_wo32(ctx, 0x3850, 0x3f000000);
297 nv_wo32(ctx, 0x3858, 0x40000000);
298 nv_wo32(ctx, 0x385c, 0x3f800000);
299 nv_wo32(ctx, 0x3864, 0xbf800000);
300 nv_wo32(ctx, 0x386c, 0xbf800000);
301}
302
303static void
304nv34_graph_context_init(struct nouveau_gpuobj *ctx)
305{
306 int i;
307
308 nv_wo32(ctx, 0x040c, 0x01000101);
309 nv_wo32(ctx, 0x0420, 0x00000111);
310 nv_wo32(ctx, 0x0424, 0x00000060);
311 nv_wo32(ctx, 0x0440, 0x00000080);
312 nv_wo32(ctx, 0x0444, 0xffff0000);
313 nv_wo32(ctx, 0x0448, 0x00000001);
314 nv_wo32(ctx, 0x045c, 0x44400000);
315 nv_wo32(ctx, 0x0480, 0xffff0000);
316 for (i = 0x04d4; i < 0x04dc; i += 4)
317 nv_wo32(ctx, i, 0x0fff0000);
318 nv_wo32(ctx, 0x04e0, 0x00011100);
319 for (i = 0x04fc; i < 0x053c; i += 4)
320 nv_wo32(ctx, i, 0x07ff0000);
321 nv_wo32(ctx, 0x0544, 0x4b7fffff);
322 nv_wo32(ctx, 0x057c, 0x00000080);
323 nv_wo32(ctx, 0x0580, 0x30201000);
324 nv_wo32(ctx, 0x0584, 0x70605040);
325 nv_wo32(ctx, 0x0588, 0xb8a89888);
326 nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
327 nv_wo32(ctx, 0x05a0, 0xb0000000);
328 for (i = 0x05f0; i < 0x0630; i += 4)
329 nv_wo32(ctx, i, 0x00010588);
330 for (i = 0x0630; i < 0x0670; i += 4)
331 nv_wo32(ctx, i, 0x00030303);
332 for (i = 0x06b0; i < 0x06f0; i += 4)
333 nv_wo32(ctx, i, 0x0008aae4);
334 for (i = 0x06f0; i < 0x0730; i += 4)
335 nv_wo32(ctx, i, 0x01012000);
336 for (i = 0x0730; i < 0x0770; i += 4)
337 nv_wo32(ctx, i, 0x00080008);
338 nv_wo32(ctx, 0x0850, 0x00040000);
339 nv_wo32(ctx, 0x0854, 0x00010000);
340 for (i = 0x0858; i < 0x0868; i += 4)
341 nv_wo32(ctx, i, 0x00040004);
342 for (i = 0x15ac; i <= 0x271c ; i += 16) {
343 nv_wo32(ctx, i + 0, 0x10700ff9);
344 nv_wo32(ctx, i + 1, 0x0436086c);
345 nv_wo32(ctx, i + 2, 0x000c001b);
346 }
347 for (i = 0x274c; i < 0x275c; i += 4)
348 nv_wo32(ctx, i, 0x0000ffff);
349 nv_wo32(ctx, 0x2ae0, 0x3f800000);
350 nv_wo32(ctx, 0x2e9c, 0x3f800000);
351 nv_wo32(ctx, 0x2eb0, 0x3f800000);
352 nv_wo32(ctx, 0x2edc, 0x40000000);
353 nv_wo32(ctx, 0x2ee0, 0x3f800000);
354 nv_wo32(ctx, 0x2ee4, 0x3f000000);
355 nv_wo32(ctx, 0x2eec, 0x40000000);
356 nv_wo32(ctx, 0x2ef0, 0x3f800000);
357 nv_wo32(ctx, 0x2ef8, 0xbf800000);
358 nv_wo32(ctx, 0x2f00, 0xbf800000);
359}
360
361static void
362nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
363{
364 int i;
365
366 nv_wo32(ctx, 0x040c, 0x00000101);
367 nv_wo32(ctx, 0x0420, 0x00000111);
368 nv_wo32(ctx, 0x0424, 0x00000060);
369 nv_wo32(ctx, 0x0440, 0x00000080);
370 nv_wo32(ctx, 0x0444, 0xffff0000);
371 nv_wo32(ctx, 0x0448, 0x00000001);
372 nv_wo32(ctx, 0x045c, 0x44400000);
373 nv_wo32(ctx, 0x0488, 0xffff0000);
374 for (i = 0x04dc; i < 0x04e4; i += 4)
375 nv_wo32(ctx, i, 0x0fff0000);
376 nv_wo32(ctx, 0x04e8, 0x00011100);
377 for (i = 0x0504; i < 0x0544; i += 4)
378 nv_wo32(ctx, i, 0x07ff0000);
379 nv_wo32(ctx, 0x054c, 0x4b7fffff);
380 nv_wo32(ctx, 0x0588, 0x00000080);
381 nv_wo32(ctx, 0x058c, 0x30201000);
382 nv_wo32(ctx, 0x0590, 0x70605040);
383 nv_wo32(ctx, 0x0594, 0xb8a89888);
384 nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
385 nv_wo32(ctx, 0x05ac, 0xb0000000);
386 for (i = 0x0604; i < 0x0644; i += 4)
387 nv_wo32(ctx, i, 0x00010588);
388 for (i = 0x0644; i < 0x0684; i += 4)
389 nv_wo32(ctx, i, 0x00030303);
390 for (i = 0x06c4; i < 0x0704; i += 4)
391 nv_wo32(ctx, i, 0x0008aae4);
392 for (i = 0x0704; i < 0x0744; i += 4)
393 nv_wo32(ctx, i, 0x01012000);
394 for (i = 0x0744; i < 0x0784; i += 4)
395 nv_wo32(ctx, i, 0x00080008);
396 nv_wo32(ctx, 0x0860, 0x00040000);
397 nv_wo32(ctx, 0x0864, 0x00010000);
398 for (i = 0x0868; i < 0x0878; i += 4)
399 nv_wo32(ctx, i, 0x00040004);
400 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
401 nv_wo32(ctx, i + 0, 0x10700ff9);
402 nv_wo32(ctx, i + 4, 0x0436086c);
403 nv_wo32(ctx, i + 8, 0x000c001b);
404 }
405 for (i = 0x30bc; i < 0x30cc; i += 4)
406 nv_wo32(ctx, i, 0x0000ffff);
407 nv_wo32(ctx, 0x3450, 0x3f800000);
408 nv_wo32(ctx, 0x380c, 0x3f800000);
409 nv_wo32(ctx, 0x3820, 0x3f800000);
410 nv_wo32(ctx, 0x384c, 0x40000000);
411 nv_wo32(ctx, 0x3850, 0x3f800000);
412 nv_wo32(ctx, 0x3854, 0x3f000000);
413 nv_wo32(ctx, 0x385c, 0x40000000);
414 nv_wo32(ctx, 0x3860, 0x3f800000);
415 nv_wo32(ctx, 0x3868, 0xbf800000);
416 nv_wo32(ctx, 0x3870, 0xbf800000);
417}
418
419int
420nv20_graph_context_new(struct nouveau_channel *chan, int engine)
421{
422 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
423 struct nouveau_gpuobj *grctx = NULL;
424 struct drm_device *dev = chan->dev;
425 int ret;
426
427 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
428 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
429 if (ret)
430 return ret;
431
432 /* Initialise default context values */
433 pgraph->grctx_init(grctx);
434
435 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
436 /* CTX_USER */
437 nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
438
439 nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
440 chan->engctx[engine] = grctx;
441 return 0;
442}
443
444void
445nv20_graph_context_del(struct nouveau_channel *chan, int engine)
446{
447 struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
448 struct nouveau_gpuobj *grctx = chan->engctx[engine];
449 struct drm_device *dev = chan->dev;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 unsigned long flags;
452
453 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
454 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
455
456 /* Unload the context if it's the currently active one */
457 if (nv10_graph_channel(dev) == chan)
458 nv20_graph_unload_context(dev);
459
460 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
461 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
462
463 /* Free the context resources */
464 nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
465
466 nouveau_gpuobj_ref(NULL, &grctx);
467 chan->engctx[engine] = NULL;
468}
469
470static void
471nv20_graph_set_tile_region(struct drm_device *dev, int i)
472{
473 struct drm_nouveau_private *dev_priv = dev->dev_private;
474 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
475
476 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
477 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
478 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
479
480 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
481 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
482 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
483 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
484 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
485 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
486
487 if (dev_priv->card_type == NV_20) {
488 nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
489 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
490 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
491 }
492}
493
494int
495nv20_graph_init(struct drm_device *dev, int engine)
496{
497 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
498 struct drm_nouveau_private *dev_priv = dev->dev_private;
499 uint32_t tmp, vramsz;
500 int i;
501
502 nv_wr32(dev, NV03_PMC_ENABLE,
503 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
504 nv_wr32(dev, NV03_PMC_ENABLE,
505 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
506
507 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
508
509 nv20_graph_rdi(dev);
510
511 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
512 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
513
514 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
515 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
516 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
517 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
518 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
519 nv_wr32(dev, 0x40009C , 0x00000040);
520
521 if (dev_priv->chipset >= 0x25) {
522 nv_wr32(dev, 0x400890, 0x00a8cfff);
523 nv_wr32(dev, 0x400610, 0x304B1FB6);
524 nv_wr32(dev, 0x400B80, 0x1cbd3883);
525 nv_wr32(dev, 0x400B84, 0x44000000);
526 nv_wr32(dev, 0x400098, 0x40000080);
527 nv_wr32(dev, 0x400B88, 0x000000ff);
528
529 } else {
530 nv_wr32(dev, 0x400880, 0x0008c7df);
531 nv_wr32(dev, 0x400094, 0x00000005);
532 nv_wr32(dev, 0x400B80, 0x45eae20e);
533 nv_wr32(dev, 0x400B84, 0x24000000);
534 nv_wr32(dev, 0x400098, 0x00000040);
535 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
536 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
537 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
538 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
539 }
540
541 /* Turn all the tiling regions off. */
542 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
543 nv20_graph_set_tile_region(dev, i);
544
545 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
546 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
547 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
548
549 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
550 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
551
552 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
553 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
554 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
555 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
556
557 /* begin RAM config */
558 vramsz = pci_resource_len(dev->pdev, 0) - 1;
559 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
560 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
561 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
562 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
563 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
564 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
565 nv_wr32(dev, 0x400820, 0);
566 nv_wr32(dev, 0x400824, 0);
567 nv_wr32(dev, 0x400864, vramsz - 1);
568 nv_wr32(dev, 0x400868, vramsz - 1);
569
570 /* interesting.. the below overwrites some of the tile setup above.. */
571 nv_wr32(dev, 0x400B20, 0x00000000);
572 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
573
574 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
575 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
576 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
577 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
578
579 return 0;
580}
581
582int
583nv30_graph_init(struct drm_device *dev, int engine)
584{
585 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
586 struct drm_nouveau_private *dev_priv = dev->dev_private;
587 int i;
588
589 nv_wr32(dev, NV03_PMC_ENABLE,
590 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
591 nv_wr32(dev, NV03_PMC_ENABLE,
592 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
593
594 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
595
596 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
597 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
598
599 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
600 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
601 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
602 nv_wr32(dev, 0x400890, 0x01b463ff);
603 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
604 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
605 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
606 nv_wr32(dev, 0x400B80, 0x1003d888);
607 nv_wr32(dev, 0x400B84, 0x0c000000);
608 nv_wr32(dev, 0x400098, 0x00000000);
609 nv_wr32(dev, 0x40009C, 0x0005ad00);
610 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
611 nv_wr32(dev, 0x4000a0, 0x00000000);
612 nv_wr32(dev, 0x4000a4, 0x00000008);
613 nv_wr32(dev, 0x4008a8, 0xb784a400);
614 nv_wr32(dev, 0x400ba0, 0x002f8685);
615 nv_wr32(dev, 0x400ba4, 0x00231f3f);
616 nv_wr32(dev, 0x4008a4, 0x40000020);
617
618 if (dev_priv->chipset == 0x34) {
619 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
620 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
621 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
622 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
623 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
624 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
625 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
626 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
627 }
628
629 nv_wr32(dev, 0x4000c0, 0x00000016);
630
631 /* Turn all the tiling regions off. */
632 for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
633 nv20_graph_set_tile_region(dev, i);
634
635 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
636 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
637 nv_wr32(dev, 0x0040075c , 0x00000001);
638
639 /* begin RAM config */
640 /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
641 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
642 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
643 if (dev_priv->chipset != 0x34) {
644 nv_wr32(dev, 0x400750, 0x00EA0000);
645 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
646 nv_wr32(dev, 0x400750, 0x00EA0004);
647 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
648 }
649
650 return 0;
651}
652
653int
654nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
655{
656 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
657 if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
658 nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
659 return -EBUSY;
660 }
661 nv20_graph_unload_context(dev);
662 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
663 return 0;
664}
665
666static void
667nv20_graph_isr(struct drm_device *dev)
668{
669 u32 stat;
670
671 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
672 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
673 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
674 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
675 u32 chid = (addr & 0x01f00000) >> 20;
676 u32 subc = (addr & 0x00070000) >> 16;
677 u32 mthd = (addr & 0x00001ffc);
678 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
679 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
680 u32 show = stat;
681
682 if (stat & NV_PGRAPH_INTR_ERROR) {
683 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
684 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
685 show &= ~NV_PGRAPH_INTR_ERROR;
686 }
687 }
688
689 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
690 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
691
692 if (show && nouveau_ratelimit()) {
693 NV_INFO(dev, "PGRAPH -");
694 nouveau_bitfield_print(nv10_graph_intr, show);
695 printk(" nsource:");
696 nouveau_bitfield_print(nv04_graph_nsource, nsource);
697 printk(" nstatus:");
698 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
699 printk("\n");
700 NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
701 "mthd 0x%04x data 0x%08x\n",
702 chid, subc, class, mthd, data);
703 }
704 }
705}
706
707static void
708nv20_graph_destroy(struct drm_device *dev, int engine)
709{
710 struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
711
712 nouveau_irq_unregister(dev, 12);
713 nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
714
715 NVOBJ_ENGINE_DEL(dev, GR);
716 kfree(pgraph);
717}
718
719int
720nv20_graph_create(struct drm_device *dev)
721{
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct nv20_graph_engine *pgraph;
724 int ret;
725
726 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
727 if (!pgraph)
728 return -ENOMEM;
729
730 pgraph->base.destroy = nv20_graph_destroy;
731 pgraph->base.fini = nv20_graph_fini;
732 pgraph->base.context_new = nv20_graph_context_new;
733 pgraph->base.context_del = nv20_graph_context_del;
734 pgraph->base.object_new = nv04_graph_object_new;
735 pgraph->base.set_tile_region = nv20_graph_set_tile_region;
736
737 pgraph->grctx_user = 0x0028;
738 if (dev_priv->card_type == NV_20) {
739 pgraph->base.init = nv20_graph_init;
740 switch (dev_priv->chipset) {
741 case 0x20:
742 pgraph->grctx_init = nv20_graph_context_init;
743 pgraph->grctx_size = NV20_GRCTX_SIZE;
744 pgraph->grctx_user = 0x0000;
745 break;
746 case 0x25:
747 case 0x28:
748 pgraph->grctx_init = nv25_graph_context_init;
749 pgraph->grctx_size = NV25_GRCTX_SIZE;
750 break;
751 case 0x2a:
752 pgraph->grctx_init = nv2a_graph_context_init;
753 pgraph->grctx_size = NV2A_GRCTX_SIZE;
754 pgraph->grctx_user = 0x0000;
755 break;
756 default:
757 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
758 kfree(pgraph);
759 return 0;
760 }
761 } else {
762 pgraph->base.init = nv30_graph_init;
763 switch (dev_priv->chipset) {
764 case 0x30:
765 case 0x31:
766 pgraph->grctx_init = nv30_31_graph_context_init;
767 pgraph->grctx_size = NV30_31_GRCTX_SIZE;
768 break;
769 case 0x34:
770 pgraph->grctx_init = nv34_graph_context_init;
771 pgraph->grctx_size = NV34_GRCTX_SIZE;
772 break;
773 case 0x35:
774 case 0x36:
775 pgraph->grctx_init = nv35_36_graph_context_init;
776 pgraph->grctx_size = NV35_36_GRCTX_SIZE;
777 break;
778 default:
779 NV_ERROR(dev, "PGRAPH: unknown chipset\n");
780 kfree(pgraph);
781 return 0;
782 }
783 }
784
785 /* Create Context Pointer Table */
786 ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
787 &pgraph->ctxtab);
788 if (ret) {
789 kfree(pgraph);
790 return ret;
791 }
792
793 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
794 nouveau_irq_register(dev, 12, nv20_graph_isr);
795
796 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
797 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
798 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
799 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
800 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
801 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
802 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
803 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
804 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
805 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
806 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
807 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
808 if (dev_priv->card_type == NV_20) {
809 NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
810 NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
811
812 /* kelvin */
813 if (dev_priv->chipset < 0x25)
814 NVOBJ_CLASS(dev, 0x0097, GR);
815 else
816 NVOBJ_CLASS(dev, 0x0597, GR);
817 } else {
818 NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
819 NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
820 NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
821 NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
822
823 /* rankine */
824 if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
825 NVOBJ_CLASS(dev, 0x0397, GR);
826 else
827 if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
828 NVOBJ_CLASS(dev, 0x0697, GR);
829 else
830 if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
831 NVOBJ_CLASS(dev, 0x0497, GR);
832 }
833
834 return 0;
835}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
deleted file mode 100644
index 9cc4de8de5ca..000000000000
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include <drm/nouveau_drm.h>
30
31void
32nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
33 uint32_t size, uint32_t pitch, uint32_t flags)
34{
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
37
38 tile->addr = addr | 1;
39 tile->limit = max(1u, addr + size) - 1;
40 tile->pitch = pitch;
41}
42
43void
44nv30_fb_free_tile_region(struct drm_device *dev, int i)
45{
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
48
49 tile->addr = tile->limit = tile->pitch = 0;
50}
51
52static int
53calc_bias(struct drm_device *dev, int k, int i, int j)
54{
55 struct drm_nouveau_private *dev_priv = dev->dev_private;
56 int b = (dev_priv->chipset > 0x30 ?
57 nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
58 0) & 0xf;
59
60 return 2 * (b & 0x8 ? b - 0x10 : b);
61}
62
63static int
64calc_ref(struct drm_device *dev, int l, int k, int i)
65{
66 int j, x = 0;
67
68 for (j = 0; j < 4; j++) {
69 int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
70
71 x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
72 }
73
74 return x;
75}
76
77int
78nv30_fb_init(struct drm_device *dev)
79{
80 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
82 int i, j;
83
84 pfb->num_tiles = NV10_PFB_TILE__SIZE;
85
86 /* Turn all the tiling regions off. */
87 for (i = 0; i < pfb->num_tiles; i++)
88 pfb->set_tile_region(dev, i);
89
90 /* Init the memory timing regs at 0x10037c/0x1003ac */
91 if (dev_priv->chipset == 0x30 ||
92 dev_priv->chipset == 0x31 ||
93 dev_priv->chipset == 0x35) {
94 /* Related to ROP count */
95 int n = (dev_priv->chipset == 0x31 ? 2 : 4);
96 int l = nv_rd32(dev, 0x1003d0);
97
98 for (i = 0; i < n; i++) {
99 for (j = 0; j < 3; j++)
100 nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
101 calc_ref(dev, l, 0, j));
102
103 for (j = 0; j < 2; j++)
104 nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
105 calc_ref(dev, l, 1, j));
106 }
107 }
108
109 return 0;
110}
111
112void
113nv30_fb_takedown(struct drm_device *dev)
114{
115}
diff --git a/drivers/gpu/drm/nouveau/nv31_mpeg.c b/drivers/gpu/drm/nouveau/nv31_mpeg.c
deleted file mode 100644
index 818deb67588e..000000000000
--- a/drivers/gpu/drm/nouveau/nv31_mpeg.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_fifo.h"
28#include "nouveau_ramht.h"
29
30struct nv31_mpeg_engine {
31 struct nouveau_exec_engine base;
32 atomic_t refcount;
33};
34
35
36static int
37nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
38{
39 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
40
41 if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
42 return -EBUSY;
43
44 chan->engctx[engine] = (void *)0xdeadcafe;
45 return 0;
46}
47
48static void
49nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
50{
51 struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
52 atomic_dec(&pmpeg->refcount);
53 chan->engctx[engine] = NULL;
54}
55
56static int
57nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
58{
59 struct drm_device *dev = chan->dev;
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61 struct nouveau_gpuobj *ctx = NULL;
62 unsigned long flags;
63 int ret;
64
65 NV_DEBUG(dev, "ch%d\n", chan->id);
66
67 ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
68 NVOBJ_FLAG_ZERO_FREE, &ctx);
69 if (ret)
70 return ret;
71
72 nv_wo32(ctx, 0x78, 0x02001ec1);
73
74 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
75 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
76 if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
77 nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
78 nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
79 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
81
82 chan->engctx[engine] = ctx;
83 return 0;
84}
85
86static void
87nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
88{
89 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
90 struct nouveau_gpuobj *ctx = chan->engctx[engine];
91 struct drm_device *dev = chan->dev;
92 unsigned long flags;
93 u32 inst = 0x80000000 | (ctx->pinst >> 4);
94
95 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
96 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
97 if (nv_rd32(dev, 0x00b318) == inst)
98 nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
99 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
100 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
101
102 nouveau_gpuobj_ref(NULL, &ctx);
103 chan->engctx[engine] = NULL;
104}
105
106static int
107nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
108 u32 handle, u16 class)
109{
110 struct drm_device *dev = chan->dev;
111 struct nouveau_gpuobj *obj = NULL;
112 int ret;
113
114 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
115 NVOBJ_FLAG_ZERO_FREE, &obj);
116 if (ret)
117 return ret;
118 obj->engine = 2;
119 obj->class = class;
120
121 nv_wo32(obj, 0x00, class);
122
123 ret = nouveau_ramht_insert(chan, handle, obj);
124 nouveau_gpuobj_ref(NULL, &obj);
125 return ret;
126}
127
128static int
129nv31_mpeg_init(struct drm_device *dev, int engine)
130{
131 struct drm_nouveau_private *dev_priv = dev->dev_private;
132 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
133 int i;
134
135 /* VPE init */
136 nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
137 nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
138 nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
139 nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
140
141 for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
142 pmpeg->base.set_tile_region(dev, i);
143
144 /* PMPEG init */
145 nv_wr32(dev, 0x00b32c, 0x00000000);
146 nv_wr32(dev, 0x00b314, 0x00000100);
147 nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
148 nv_wr32(dev, 0x00b300, 0x02001ec1);
149 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
150
151 nv_wr32(dev, 0x00b100, 0xffffffff);
152 nv_wr32(dev, 0x00b140, 0xffffffff);
153
154 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
155 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
156 return -EBUSY;
157 }
158
159 return 0;
160}
161
162static int
163nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
164{
165 /*XXX: context save? */
166 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
167 nv_wr32(dev, 0x00b140, 0x00000000);
168 return 0;
169}
170
171static int
172nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
173{
174 struct drm_device *dev = chan->dev;
175 u32 inst = data << 4;
176 u32 dma0 = nv_ri32(dev, inst + 0);
177 u32 dma1 = nv_ri32(dev, inst + 4);
178 u32 dma2 = nv_ri32(dev, inst + 8);
179 u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
180 u32 size = dma1 + 1;
181
182 /* only allow linear DMA objects */
183 if (!(dma0 & 0x00002000))
184 return -EINVAL;
185
186 if (mthd == 0x0190) {
187 /* DMA_CMD */
188 nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
189 nv_wr32(dev, 0x00b334, base);
190 nv_wr32(dev, 0x00b324, size);
191 } else
192 if (mthd == 0x01a0) {
193 /* DMA_DATA */
194 nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
195 nv_wr32(dev, 0x00b360, base);
196 nv_wr32(dev, 0x00b364, size);
197 } else {
198 /* DMA_IMAGE, VRAM only */
199 if (dma0 & 0x000c0000)
200 return -EINVAL;
201
202 nv_wr32(dev, 0x00b370, base);
203 nv_wr32(dev, 0x00b374, size);
204 }
205
206 return 0;
207}
208
209static int
210nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
211{
212 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
213 struct drm_nouveau_private *dev_priv = dev->dev_private;
214 struct nouveau_gpuobj *ctx;
215 unsigned long flags;
216 int i;
217
218 /* hardcode drm channel id on nv3x, so swmthd lookup works */
219 if (dev_priv->card_type < NV_40)
220 return 0;
221
222 spin_lock_irqsave(&dev_priv->channels.lock, flags);
223 for (i = 0; i < pfifo->channels; i++) {
224 if (!dev_priv->channels.ptr[i])
225 continue;
226
227 ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
228 if (ctx && ctx->pinst == inst)
229 break;
230 }
231 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
232 return i;
233}
234
235static void
236nv31_vpe_set_tile_region(struct drm_device *dev, int i)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
240
241 nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
242 nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
243 nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
244}
245
246static void
247nv31_mpeg_isr(struct drm_device *dev)
248{
249 u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
250 u32 chid = nv31_mpeg_isr_chid(dev, inst);
251 u32 stat = nv_rd32(dev, 0x00b100);
252 u32 type = nv_rd32(dev, 0x00b230);
253 u32 mthd = nv_rd32(dev, 0x00b234);
254 u32 data = nv_rd32(dev, 0x00b238);
255 u32 show = stat;
256
257 if (stat & 0x01000000) {
258 /* happens on initial binding of the object */
259 if (type == 0x00000020 && mthd == 0x0000) {
260 nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
261 show &= ~0x01000000;
262 }
263
264 if (type == 0x00000010) {
265 if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
266 show &= ~0x01000000;
267 }
268 }
269
270 nv_wr32(dev, 0x00b100, stat);
271 nv_wr32(dev, 0x00b230, 0x00000001);
272
273 if (show && nouveau_ratelimit()) {
274 NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
275 chid, inst, stat, type, mthd, data);
276 }
277}
278
279static void
280nv31_vpe_isr(struct drm_device *dev)
281{
282 if (nv_rd32(dev, 0x00b100))
283 nv31_mpeg_isr(dev);
284
285 if (nv_rd32(dev, 0x00b800)) {
286 u32 stat = nv_rd32(dev, 0x00b800);
287 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
288 nv_wr32(dev, 0xb800, stat);
289 }
290}
291
292static void
293nv31_mpeg_destroy(struct drm_device *dev, int engine)
294{
295 struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
296
297 nouveau_irq_unregister(dev, 0);
298
299 NVOBJ_ENGINE_DEL(dev, MPEG);
300 kfree(pmpeg);
301}
302
303int
304nv31_mpeg_create(struct drm_device *dev)
305{
306 struct drm_nouveau_private *dev_priv = dev->dev_private;
307 struct nv31_mpeg_engine *pmpeg;
308
309 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
310 if (!pmpeg)
311 return -ENOMEM;
312 atomic_set(&pmpeg->refcount, 0);
313
314 pmpeg->base.destroy = nv31_mpeg_destroy;
315 pmpeg->base.init = nv31_mpeg_init;
316 pmpeg->base.fini = nv31_mpeg_fini;
317 if (dev_priv->card_type < NV_40) {
318 pmpeg->base.context_new = nv31_mpeg_context_new;
319 pmpeg->base.context_del = nv31_mpeg_context_del;
320 } else {
321 pmpeg->base.context_new = nv40_mpeg_context_new;
322 pmpeg->base.context_del = nv40_mpeg_context_del;
323 }
324 pmpeg->base.object_new = nv31_mpeg_object_new;
325
326 /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
327 * all VPE engines, for this driver's purposes the PMPEG engine
328 * will be treated as the "master" and handle the global VPE
329 * bits too
330 */
331 pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
332 nouveau_irq_register(dev, 0, nv31_vpe_isr);
333
334 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
335 NVOBJ_CLASS(dev, 0x3174, MPEG);
336 NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
337 NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
338 NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
339
340#if 0
341 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
342 NVOBJ_CLASS(dev, 0x4075, ME);
343#endif
344 return 0;
345
346}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
deleted file mode 100644
index 88b4f7c43992..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ /dev/null
@@ -1,162 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5void
6nv40_fb_set_tile_region(struct drm_device *dev, int i)
7{
8 struct drm_nouveau_private *dev_priv = dev->dev_private;
9 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
10
11 switch (dev_priv->chipset) {
12 case 0x40:
13 nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
14 nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
15 nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
16 break;
17
18 default:
19 nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
20 nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
21 nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
22 break;
23 }
24}
25
26static void
27nv40_fb_init_gart(struct drm_device *dev)
28{
29 struct drm_nouveau_private *dev_priv = dev->dev_private;
30 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
31
32 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
33 nv_wr32(dev, 0x100800, 0x00000001);
34 return;
35 }
36
37 nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
38 nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
39 nv_wr32(dev, 0x100820, 0x00000000);
40}
41
42static void
43nv44_fb_init_gart(struct drm_device *dev)
44{
45 struct drm_nouveau_private *dev_priv = dev->dev_private;
46 struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
47 u32 vinst;
48
49 if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
50 nv_wr32(dev, 0x100850, 0x80000000);
51 nv_wr32(dev, 0x100800, 0x00000001);
52 return;
53 }
54
55 /* calculate vram address of this PRAMIN block, object
56 * must be allocated on 512KiB alignment, and not exceed
57 * a total size of 512KiB for this to work correctly
58 */
59 vinst = nv_rd32(dev, 0x10020c);
60 vinst -= ((gart->pinst >> 19) + 1) << 19;
61
62 nv_wr32(dev, 0x100850, 0x80000000);
63 nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
64
65 nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
66 nv_wr32(dev, 0x100850, 0x00008000);
67 nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
68 nv_wr32(dev, 0x100820, 0x00000000);
69 nv_wr32(dev, 0x10082c, 0x00000001);
70 nv_wr32(dev, 0x100800, vinst | 0x00000010);
71}
72
73int
74nv40_fb_vram_init(struct drm_device *dev)
75{
76 struct drm_nouveau_private *dev_priv = dev->dev_private;
77
78 /* 0x001218 is actually present on a few other NV4X I looked at,
79 * and even contains sane values matching 0x100474. From looking
80 * at various vbios images however, this isn't the case everywhere.
81 * So, I chose to use the same regs I've seen NVIDIA reading around
82 * the memory detection, hopefully that'll get us the right numbers
83 */
84 if (dev_priv->chipset == 0x40) {
85 u32 pbus1218 = nv_rd32(dev, 0x001218);
86 switch (pbus1218 & 0x00000300) {
87 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
88 case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
89 case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
90 case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
91 }
92 } else
93 if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
94 u32 pfb914 = nv_rd32(dev, 0x100914);
95 switch (pfb914 & 0x00000003) {
96 case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
97 case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
98 case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
99 case 0x00000003: break;
100 }
101 } else
102 if (dev_priv->chipset != 0x4e) {
103 u32 pfb474 = nv_rd32(dev, 0x100474);
104 if (pfb474 & 0x00000004)
105 dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
106 if (pfb474 & 0x00000002)
107 dev_priv->vram_type = NV_MEM_TYPE_DDR2;
108 if (pfb474 & 0x00000001)
109 dev_priv->vram_type = NV_MEM_TYPE_DDR1;
110 } else {
111 dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
112 }
113
114 dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
115 return 0;
116}
117
118int
119nv40_fb_init(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
123 uint32_t tmp;
124 int i;
125
126 if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
127 if (nv44_graph_class(dev))
128 nv44_fb_init_gart(dev);
129 else
130 nv40_fb_init_gart(dev);
131 }
132
133 switch (dev_priv->chipset) {
134 case 0x40:
135 case 0x45:
136 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
137 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
138 pfb->num_tiles = NV10_PFB_TILE__SIZE;
139 break;
140 case 0x46: /* G72 */
141 case 0x47: /* G70 */
142 case 0x49: /* G71 */
143 case 0x4b: /* G73 */
144 case 0x4c: /* C51 (G7X version) */
145 pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
146 break;
147 default:
148 pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
149 break;
150 }
151
152 /* Turn all the tiling regions off. */
153 for (i = 0; i < pfb->num_tiles; i++)
154 pfb->set_tile_region(dev, i);
155
156 return 0;
157}
158
159void
160nv40_fb_takedown(struct drm_device *dev)
161{
162}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
deleted file mode 100644
index cf952d2048ed..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ /dev/null
@@ -1,209 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_util.h"
31#include "nouveau_ramht.h"
32
33static struct ramfc_desc {
34 unsigned bits:6;
35 unsigned ctxs:5;
36 unsigned ctxp:8;
37 unsigned regs:5;
38 unsigned regp;
39} nv40_ramfc[] = {
40 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
41 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
42 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
43 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
44 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
45 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
46 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
47 { 2, 28, 0x18, 28, 0x002058 },
48 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
49 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
50 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
51 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
52 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
53 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
54 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
55 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
56 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
57 { 32, 0, 0x40, 0, 0x0032e4 },
58 { 32, 0, 0x44, 0, 0x0032e8 },
59 { 32, 0, 0x4c, 0, 0x002088 },
60 { 32, 0, 0x50, 0, 0x003300 },
61 { 32, 0, 0x54, 0, 0x00330c },
62 {}
63};
64
65struct nv40_fifo_priv {
66 struct nouveau_fifo_priv base;
67 struct ramfc_desc *ramfc_desc;
68};
69
70struct nv40_fifo_chan {
71 struct nouveau_fifo_chan base;
72 struct nouveau_gpuobj *ramfc;
73};
74
75static int
76nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
77{
78 struct drm_device *dev = chan->dev;
79 struct drm_nouveau_private *dev_priv = dev->dev_private;
80 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
81 struct nv40_fifo_chan *fctx;
82 unsigned long flags;
83 int ret;
84
85 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
86 if (!fctx)
87 return -ENOMEM;
88
89 /* map channel control registers */
90 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
91 NV03_USER(chan->id), PAGE_SIZE);
92 if (!chan->user) {
93 ret = -ENOMEM;
94 goto error;
95 }
96
97 /* initialise default fifo context */
98 ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
99 chan->id * 128, ~0, 128,
100 NVOBJ_FLAG_ZERO_ALLOC |
101 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
102 if (ret)
103 goto error;
104
105 nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
106 nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
107 nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
108 nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
109 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
110 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
111#ifdef __BIG_ENDIAN
112 NV_PFIFO_CACHE1_BIG_ENDIAN |
113#endif
114 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
115 nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
116
117 /* enable dma mode on the channel */
118 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
119 nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
120 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
121
122 /*XXX: remove this later, need fifo engine context commit hook */
123 nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
124
125error:
126 if (ret)
127 priv->base.base.context_del(chan, engine);
128 return ret;
129}
130
131static int
132nv40_fifo_init(struct drm_device *dev, int engine)
133{
134 struct drm_nouveau_private *dev_priv = dev->dev_private;
135 struct nv40_fifo_priv *priv = nv_engine(dev, engine);
136 int i;
137
138 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
139 nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
140
141 nv_wr32(dev, 0x002040, 0x000000ff);
142 nv_wr32(dev, 0x002044, 0x2101ffff);
143 nv_wr32(dev, 0x002058, 0x00000001);
144
145 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
146 ((dev_priv->ramht->bits - 9) << 16) |
147 (dev_priv->ramht->gpuobj->pinst >> 8));
148 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
149
150 switch (dev_priv->chipset) {
151 case 0x47:
152 case 0x49:
153 case 0x4b:
154 nv_wr32(dev, 0x002230, 0x00000001);
155 case 0x40:
156 case 0x41:
157 case 0x42:
158 case 0x43:
159 case 0x45:
160 case 0x48:
161 nv_wr32(dev, 0x002220, 0x00030002);
162 break;
163 default:
164 nv_wr32(dev, 0x002230, 0x00000000);
165 nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
166 dev_priv->ramfc->pinst) >> 16) |
167 0x00030000);
168 break;
169 }
170
171 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
172
173 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
174 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
175
176 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
177 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
178 nv_wr32(dev, NV03_PFIFO_CACHES, 1);
179
180 for (i = 0; i < priv->base.channels; i++) {
181 if (dev_priv->channels.ptr[i])
182 nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
183 }
184
185 return 0;
186}
187
188int
189nv40_fifo_create(struct drm_device *dev)
190{
191 struct drm_nouveau_private *dev_priv = dev->dev_private;
192 struct nv40_fifo_priv *priv;
193
194 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
195 if (!priv)
196 return -ENOMEM;
197
198 priv->base.base.destroy = nv04_fifo_destroy;
199 priv->base.base.init = nv40_fifo_init;
200 priv->base.base.fini = nv04_fifo_fini;
201 priv->base.base.context_new = nv40_fifo_context_new;
202 priv->base.base.context_del = nv04_fifo_context_del;
203 priv->base.channels = 31;
204 priv->ramfc_desc = nv40_ramfc;
205 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
206
207 nouveau_irq_register(dev, 8, nv04_fifo_isr);
208 return 0;
209}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
deleted file mode 100644
index 5489201bec0b..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ /dev/null
@@ -1,466 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_ramht.h"
31
32struct nv40_graph_engine {
33 struct nouveau_exec_engine base;
34 u32 grctx_size;
35};
36
37static int
38nv40_graph_context_new(struct nouveau_channel *chan, int engine)
39{
40 struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
41 struct drm_device *dev = chan->dev;
42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nouveau_gpuobj *grctx = NULL;
44 unsigned long flags;
45 int ret;
46
47 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
48 NVOBJ_FLAG_ZERO_ALLOC, &grctx);
49 if (ret)
50 return ret;
51
52 /* Initialise default context values */
53 nv40_grctx_fill(dev, grctx);
54 nv_wo32(grctx, 0, grctx->vinst);
55
56 /* init grctx pointer in ramfc, and on PFIFO if channel is
57 * already active there
58 */
59 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
60 nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
61 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
62 if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
63 nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
64 nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
65 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
66
67 chan->engctx[engine] = grctx;
68 return 0;
69}
70
71static void
72nv40_graph_context_del(struct nouveau_channel *chan, int engine)
73{
74 struct nouveau_gpuobj *grctx = chan->engctx[engine];
75 struct drm_device *dev = chan->dev;
76 struct drm_nouveau_private *dev_priv = dev->dev_private;
77 u32 inst = 0x01000000 | (grctx->pinst >> 4);
78 unsigned long flags;
79
80 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
81 nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
82 if (nv_rd32(dev, 0x40032c) == inst)
83 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
84 if (nv_rd32(dev, 0x400330) == inst)
85 nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
86 nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
87 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
88
89 /* Free the context resources */
90 nouveau_gpuobj_ref(NULL, &grctx);
91 chan->engctx[engine] = NULL;
92}
93
94int
95nv40_graph_object_new(struct nouveau_channel *chan, int engine,
96 u32 handle, u16 class)
97{
98 struct drm_device *dev = chan->dev;
99 struct nouveau_gpuobj *obj = NULL;
100 int ret;
101
102 ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
103 if (ret)
104 return ret;
105 obj->engine = 1;
106 obj->class = class;
107
108 nv_wo32(obj, 0x00, class);
109 nv_wo32(obj, 0x04, 0x00000000);
110#ifndef __BIG_ENDIAN
111 nv_wo32(obj, 0x08, 0x00000000);
112#else
113 nv_wo32(obj, 0x08, 0x01000000);
114#endif
115 nv_wo32(obj, 0x0c, 0x00000000);
116 nv_wo32(obj, 0x10, 0x00000000);
117
118 ret = nouveau_ramht_insert(chan, handle, obj);
119 nouveau_gpuobj_ref(NULL, &obj);
120 return ret;
121}
122
123static void
124nv40_graph_set_tile_region(struct drm_device *dev, int i)
125{
126 struct drm_nouveau_private *dev_priv = dev->dev_private;
127 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
128
129 switch (dev_priv->chipset) {
130 case 0x40:
131 case 0x41: /* guess */
132 case 0x42:
133 case 0x43:
134 case 0x45: /* guess */
135 case 0x4e:
136 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
137 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
138 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
139 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
140 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
141 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
142 break;
143 case 0x44:
144 case 0x4a:
145 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
146 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
147 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
148 break;
149 case 0x46:
150 case 0x47:
151 case 0x49:
152 case 0x4b:
153 case 0x4c:
154 case 0x67:
155 default:
156 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
157 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
158 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
159 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
160 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
161 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
162 break;
163 }
164}
165
166/*
167 * G70 0x47
168 * G71 0x49
169 * NV45 0x48
170 * G72[M] 0x46
171 * G73 0x4b
172 * C51_G7X 0x4c
173 * C51 0x4e
174 */
175int
176nv40_graph_init(struct drm_device *dev, int engine)
177{
178 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
179 struct drm_nouveau_private *dev_priv = dev->dev_private;
180 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
181 uint32_t vramsz;
182 int i, j;
183
184 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
185 ~NV_PMC_ENABLE_PGRAPH);
186 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
187 NV_PMC_ENABLE_PGRAPH);
188
189 /* generate and upload context program */
190 nv40_grctx_init(dev, &pgraph->grctx_size);
191
192 /* No context present currently */
193 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
194
195 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
196 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
197
198 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
199 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
200 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
201 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
202 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
203 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
204
205 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
206 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
207
208 j = nv_rd32(dev, 0x1540) & 0xff;
209 if (j) {
210 for (i = 0; !(j & 1); j >>= 1, i++)
211 ;
212 nv_wr32(dev, 0x405000, i);
213 }
214
215 if (dev_priv->chipset == 0x40) {
216 nv_wr32(dev, 0x4009b0, 0x83280fff);
217 nv_wr32(dev, 0x4009b4, 0x000000a0);
218 } else {
219 nv_wr32(dev, 0x400820, 0x83280eff);
220 nv_wr32(dev, 0x400824, 0x000000a0);
221 }
222
223 switch (dev_priv->chipset) {
224 case 0x40:
225 case 0x45:
226 nv_wr32(dev, 0x4009b8, 0x0078e366);
227 nv_wr32(dev, 0x4009bc, 0x0000014c);
228 break;
229 case 0x41:
230 case 0x42: /* pciid also 0x00Cx */
231 /* case 0x0120: XXX (pciid) */
232 nv_wr32(dev, 0x400828, 0x007596ff);
233 nv_wr32(dev, 0x40082c, 0x00000108);
234 break;
235 case 0x43:
236 nv_wr32(dev, 0x400828, 0x0072cb77);
237 nv_wr32(dev, 0x40082c, 0x00000108);
238 break;
239 case 0x44:
240 case 0x46: /* G72 */
241 case 0x4a:
242 case 0x4c: /* G7x-based C51 */
243 case 0x4e:
244 nv_wr32(dev, 0x400860, 0);
245 nv_wr32(dev, 0x400864, 0);
246 break;
247 case 0x47: /* G70 */
248 case 0x49: /* G71 */
249 case 0x4b: /* G73 */
250 nv_wr32(dev, 0x400828, 0x07830610);
251 nv_wr32(dev, 0x40082c, 0x0000016A);
252 break;
253 default:
254 break;
255 }
256
257 nv_wr32(dev, 0x400b38, 0x2ffff800);
258 nv_wr32(dev, 0x400b3c, 0x00006000);
259
260 /* Tiling related stuff. */
261 switch (dev_priv->chipset) {
262 case 0x44:
263 case 0x4a:
264 nv_wr32(dev, 0x400bc4, 0x1003d888);
265 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
266 break;
267 case 0x46:
268 nv_wr32(dev, 0x400bc4, 0x0000e024);
269 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
270 break;
271 case 0x4c:
272 case 0x4e:
273 case 0x67:
274 nv_wr32(dev, 0x400bc4, 0x1003d888);
275 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
276 break;
277 default:
278 break;
279 }
280
281 /* Turn all the tiling regions off. */
282 for (i = 0; i < pfb->num_tiles; i++)
283 nv40_graph_set_tile_region(dev, i);
284
285 /* begin RAM config */
286 vramsz = pci_resource_len(dev->pdev, 0) - 1;
287 switch (dev_priv->chipset) {
288 case 0x40:
289 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
290 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
291 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
292 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
293 nv_wr32(dev, 0x400820, 0);
294 nv_wr32(dev, 0x400824, 0);
295 nv_wr32(dev, 0x400864, vramsz);
296 nv_wr32(dev, 0x400868, vramsz);
297 break;
298 default:
299 switch (dev_priv->chipset) {
300 case 0x41:
301 case 0x42:
302 case 0x43:
303 case 0x45:
304 case 0x4e:
305 case 0x44:
306 case 0x4a:
307 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
308 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
309 break;
310 default:
311 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
312 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
313 break;
314 }
315 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
316 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
317 nv_wr32(dev, 0x400840, 0);
318 nv_wr32(dev, 0x400844, 0);
319 nv_wr32(dev, 0x4008A0, vramsz);
320 nv_wr32(dev, 0x4008A4, vramsz);
321 break;
322 }
323
324 return 0;
325}
326
327static int
328nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
329{
330 u32 inst = nv_rd32(dev, 0x40032c);
331 if (inst & 0x01000000) {
332 nv_wr32(dev, 0x400720, 0x00000000);
333 nv_wr32(dev, 0x400784, inst);
334 nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
335 nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
336 if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
337 u32 insn = nv_rd32(dev, 0x400308);
338 NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
339 }
340 nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
341 }
342 return 0;
343}
344
345static int
346nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
347{
348 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
349 struct drm_nouveau_private *dev_priv = dev->dev_private;
350 struct nouveau_gpuobj *grctx;
351 unsigned long flags;
352 int i;
353
354 spin_lock_irqsave(&dev_priv->channels.lock, flags);
355 for (i = 0; i < pfifo->channels; i++) {
356 if (!dev_priv->channels.ptr[i])
357 continue;
358 grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
359
360 if (grctx && grctx->pinst == inst)
361 break;
362 }
363 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
364 return i;
365}
366
367static void
368nv40_graph_isr(struct drm_device *dev)
369{
370 u32 stat;
371
372 while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
373 u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
374 u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
375 u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
376 u32 chid = nv40_graph_isr_chid(dev, inst);
377 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
378 u32 subc = (addr & 0x00070000) >> 16;
379 u32 mthd = (addr & 0x00001ffc);
380 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
381 u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
382 u32 show = stat;
383
384 if (stat & NV_PGRAPH_INTR_ERROR) {
385 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
386 if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
387 show &= ~NV_PGRAPH_INTR_ERROR;
388 } else
389 if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
390 nv_mask(dev, 0x402000, 0, 0);
391 }
392 }
393
394 nv_wr32(dev, NV03_PGRAPH_INTR, stat);
395 nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
396
397 if (show && nouveau_ratelimit()) {
398 NV_INFO(dev, "PGRAPH -");
399 nouveau_bitfield_print(nv10_graph_intr, show);
400 printk(" nsource:");
401 nouveau_bitfield_print(nv04_graph_nsource, nsource);
402 printk(" nstatus:");
403 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
404 printk("\n");
405 NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
406 "class 0x%04x mthd 0x%04x data 0x%08x\n",
407 chid, inst, subc, class, mthd, data);
408 }
409 }
410}
411
412static void
413nv40_graph_destroy(struct drm_device *dev, int engine)
414{
415 struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
416
417 nouveau_irq_unregister(dev, 12);
418
419 NVOBJ_ENGINE_DEL(dev, GR);
420 kfree(pgraph);
421}
422
423int
424nv40_graph_create(struct drm_device *dev)
425{
426 struct nv40_graph_engine *pgraph;
427
428 pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
429 if (!pgraph)
430 return -ENOMEM;
431
432 pgraph->base.destroy = nv40_graph_destroy;
433 pgraph->base.init = nv40_graph_init;
434 pgraph->base.fini = nv40_graph_fini;
435 pgraph->base.context_new = nv40_graph_context_new;
436 pgraph->base.context_del = nv40_graph_context_del;
437 pgraph->base.object_new = nv40_graph_object_new;
438 pgraph->base.set_tile_region = nv40_graph_set_tile_region;
439
440 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
441 nouveau_irq_register(dev, 12, nv40_graph_isr);
442
443 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
444 NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
445 NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
446 NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
447 NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
448 NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
449 NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
450 NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
451 NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
452 NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
453 NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
454 NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
455 NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
456 NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
457 NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
458
459 /* curie */
460 if (nv44_graph_class(dev))
461 NVOBJ_CLASS(dev, 0x4497, GR);
462 else
463 NVOBJ_CLASS(dev, 0x4097, GR);
464
465 return 0;
466}
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
deleted file mode 100644
index 788584364853..000000000000
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ /dev/null
@@ -1,27 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4
5int
6nv40_mc_init(struct drm_device *dev)
7{
8 /* Power up everything, resetting each individual unit will
9 * be done later if needed.
10 */
11 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
12
13 if (nv44_graph_class(dev)) {
14 u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
15 nv_wr32(dev, NV40_PMC_1700, tmp);
16 nv_wr32(dev, NV40_PMC_1704, 0);
17 nv_wr32(dev, NV40_PMC_1708, 0);
18 nv_wr32(dev, NV40_PMC_170C, tmp);
19 }
20
21 return 0;
22}
23
24void
25nv40_mc_takedown(struct drm_device *dev)
26{
27}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index b94dd87d592c..3382064c7f33 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -23,18 +23,24 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29#include "nouveau_hw.h" 29#include "nouveau_hw.h"
30#include "nouveau_fifo.h" 30
31#include <subdev/bios/pll.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34
35#include <engine/fifo.h>
31 36
32#define min2(a,b) ((a) < (b) ? (a) : (b)) 37#define min2(a,b) ((a) < (b) ? (a) : (b))
33 38
34static u32 39static u32
35read_pll_1(struct drm_device *dev, u32 reg) 40read_pll_1(struct drm_device *dev, u32 reg)
36{ 41{
37 u32 ctrl = nv_rd32(dev, reg + 0x00); 42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 ctrl = nv_rd32(device, reg + 0x00);
38 int P = (ctrl & 0x00070000) >> 16; 44 int P = (ctrl & 0x00070000) >> 16;
39 int N = (ctrl & 0x0000ff00) >> 8; 45 int N = (ctrl & 0x0000ff00) >> 8;
40 int M = (ctrl & 0x000000ff) >> 0; 46 int M = (ctrl & 0x000000ff) >> 0;
@@ -49,8 +55,9 @@ read_pll_1(struct drm_device *dev, u32 reg)
49static u32 55static u32
50read_pll_2(struct drm_device *dev, u32 reg) 56read_pll_2(struct drm_device *dev, u32 reg)
51{ 57{
52 u32 ctrl = nv_rd32(dev, reg + 0x00); 58 struct nouveau_device *device = nouveau_dev(dev);
53 u32 coef = nv_rd32(dev, reg + 0x04); 59 u32 ctrl = nv_rd32(device, reg + 0x00);
60 u32 coef = nv_rd32(device, reg + 0x04);
54 int N2 = (coef & 0xff000000) >> 24; 61 int N2 = (coef & 0xff000000) >> 24;
55 int M2 = (coef & 0x00ff0000) >> 16; 62 int M2 = (coef & 0x00ff0000) >> 16;
56 int N1 = (coef & 0x0000ff00) >> 8; 63 int N1 = (coef & 0x0000ff00) >> 8;
@@ -89,7 +96,8 @@ read_clk(struct drm_device *dev, u32 src)
89int 96int
90nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 97nv40_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
91{ 98{
92 u32 ctrl = nv_rd32(dev, 0x00c040); 99 struct nouveau_device *device = nouveau_dev(dev);
100 u32 ctrl = nv_rd32(device, 0x00c040);
93 101
94 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0); 102 perflvl->core = read_clk(dev, (ctrl & 0x00000003) >> 0);
95 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4); 103 perflvl->shader = read_clk(dev, (ctrl & 0x00000030) >> 4);
@@ -107,27 +115,30 @@ struct nv40_pm_state {
107}; 115};
108 116
109static int 117static int
110nv40_calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, 118nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
111 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P) 119 u32 clk, int *N1, int *M1, int *N2, int *M2, int *log2P)
112{ 120{
121 struct nouveau_device *device = nouveau_dev(dev);
122 struct nouveau_bios *bios = nouveau_bios(device);
123 struct nouveau_clock *pclk = nouveau_clock(device);
113 struct nouveau_pll_vals coef; 124 struct nouveau_pll_vals coef;
114 int ret; 125 int ret;
115 126
116 ret = get_pll_limits(dev, reg, pll); 127 ret = nvbios_pll_parse(bios, reg, pll);
117 if (ret) 128 if (ret)
118 return ret; 129 return ret;
119 130
120 if (clk < pll->vco1.maxfreq) 131 if (clk < pll->vco1.max_freq)
121 pll->vco2.maxfreq = 0; 132 pll->vco2.max_freq = 0;
122 133
123 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); 134 pclk->pll_calc(pclk, pll, clk, &coef);
124 if (ret == 0) 135 if (ret == 0)
125 return -ERANGE; 136 return -ERANGE;
126 137
127 *N1 = coef.N1; 138 *N1 = coef.N1;
128 *M1 = coef.M1; 139 *M1 = coef.M1;
129 if (N2 && M2) { 140 if (N2 && M2) {
130 if (pll->vco2.maxfreq) { 141 if (pll->vco2.max_freq) {
131 *N2 = coef.N2; 142 *N2 = coef.N2;
132 *M2 = coef.M2; 143 *M2 = coef.M2;
133 } else { 144 } else {
@@ -143,7 +154,7 @@ void *
143nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 154nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
144{ 155{
145 struct nv40_pm_state *info; 156 struct nv40_pm_state *info;
146 struct pll_lims pll; 157 struct nvbios_pll pll;
147 int N1, N2, M1, M2, log2P; 158 int N1, N2, M1, M2, log2P;
148 int ret; 159 int ret;
149 160
@@ -191,7 +202,7 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
191 goto out; 202 goto out;
192 203
193 info->mpll_ctrl = 0x80000000 | (log2P << 16); 204 info->mpll_ctrl = 0x80000000 | (log2P << 16);
194 info->mpll_ctrl |= min2(pll.log2p_bias + log2P, pll.max_log2p) << 20; 205 info->mpll_ctrl |= min2(pll.bias_p + log2P, pll.max_p) << 20;
195 if (N2 == M2) { 206 if (N2 == M2) {
196 info->mpll_ctrl |= 0x00000100; 207 info->mpll_ctrl |= 0x00000100;
197 info->mpll_coef = (N1 << 8) | M1; 208 info->mpll_coef = (N1 << 8) | M1;
@@ -212,12 +223,13 @@ static bool
212nv40_pm_gr_idle(void *data) 223nv40_pm_gr_idle(void *data)
213{ 224{
214 struct drm_device *dev = data; 225 struct drm_device *dev = data;
226 struct nouveau_device *device = nouveau_dev(dev);
215 227
216 if ((nv_rd32(dev, 0x400760) & 0x000000f0) >> 4 != 228 if ((nv_rd32(device, 0x400760) & 0x000000f0) >> 4 !=
217 (nv_rd32(dev, 0x400760) & 0x0000000f)) 229 (nv_rd32(device, 0x400760) & 0x0000000f))
218 return false; 230 return false;
219 231
220 if (nv_rd32(dev, 0x400700)) 232 if (nv_rd32(device, 0x400700))
221 return false; 233 return false;
222 234
223 return true; 235 return true;
@@ -226,7 +238,9 @@ nv40_pm_gr_idle(void *data)
226int 238int
227nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) 239nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
228{ 240{
229 struct drm_nouveau_private *dev_priv = dev->dev_private; 241 struct nouveau_device *device = nouveau_dev(dev);
242 struct nouveau_fifo *pfifo = nouveau_fifo(device);
243 struct nouveau_drm *drm = nouveau_drm(dev);
230 struct nv40_pm_state *info = pre_state; 244 struct nv40_pm_state *info = pre_state;
231 unsigned long flags; 245 unsigned long flags;
232 struct bit_entry M; 246 struct bit_entry M;
@@ -236,12 +250,12 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
236 250
237 /* determine which CRTCs are active, fetch VGA_SR1 for each */ 251 /* determine which CRTCs are active, fetch VGA_SR1 for each */
238 for (i = 0; i < 2; i++) { 252 for (i = 0; i < 2; i++) {
239 u32 vbl = nv_rd32(dev, 0x600808 + (i * 0x2000)); 253 u32 vbl = nv_rd32(device, 0x600808 + (i * 0x2000));
240 u32 cnt = 0; 254 u32 cnt = 0;
241 do { 255 do {
242 if (vbl != nv_rd32(dev, 0x600808 + (i * 0x2000))) { 256 if (vbl != nv_rd32(device, 0x600808 + (i * 0x2000))) {
243 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 257 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
244 sr1[i] = nv_rd08(dev, 0x0c03c5 + (i * 0x2000)); 258 sr1[i] = nv_rd08(device, 0x0c03c5 + (i * 0x2000));
245 if (!(sr1[i] & 0x20)) 259 if (!(sr1[i] & 0x20))
246 crtc_mask |= (1 << i); 260 crtc_mask |= (1 << i);
247 break; 261 break;
@@ -251,28 +265,20 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
251 } 265 }
252 266
253 /* halt and idle engines */ 267 /* halt and idle engines */
254 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 268 pfifo->pause(pfifo, &flags);
255 nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
256 if (!nv_wait(dev, 0x002500, 0x00000010, 0x00000000))
257 goto resume;
258 nv_mask(dev, 0x003220, 0x00000001, 0x00000000);
259 if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
260 goto resume;
261 nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
262 nv04_fifo_cache_pull(dev, false);
263 269
264 if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev)) 270 if (!nv_wait_cb(device, nv40_pm_gr_idle, dev))
265 goto resume; 271 goto resume;
266 272
267 ret = 0; 273 ret = 0;
268 274
269 /* set engine clocks */ 275 /* set engine clocks */
270 nv_mask(dev, 0x00c040, 0x00000333, 0x00000000); 276 nv_mask(device, 0x00c040, 0x00000333, 0x00000000);
271 nv_wr32(dev, 0x004004, info->npll_coef); 277 nv_wr32(device, 0x004004, info->npll_coef);
272 nv_mask(dev, 0x004000, 0xc0070100, info->npll_ctrl); 278 nv_mask(device, 0x004000, 0xc0070100, info->npll_ctrl);
273 nv_mask(dev, 0x004008, 0xc007ffff, info->spll); 279 nv_mask(device, 0x004008, 0xc007ffff, info->spll);
274 mdelay(5); 280 mdelay(5);
275 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 281 nv_mask(device, 0x00c040, 0x00000333, info->ctrl);
276 282
277 if (!info->mpll_ctrl) 283 if (!info->mpll_ctrl)
278 goto resume; 284 goto resume;
@@ -281,52 +287,52 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
281 for (i = 0; i < 2; i++) { 287 for (i = 0; i < 2; i++) {
282 if (!(crtc_mask & (1 << i))) 288 if (!(crtc_mask & (1 << i)))
283 continue; 289 continue;
284 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000); 290 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00000000);
285 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 291 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
286 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 292 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
287 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20); 293 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i] | 0x20);
288 } 294 }
289 295
290 /* prepare ram for reclocking */ 296 /* prepare ram for reclocking */
291 nv_wr32(dev, 0x1002d4, 0x00000001); /* precharge */ 297 nv_wr32(device, 0x1002d4, 0x00000001); /* precharge */
292 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ 298 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
293 nv_wr32(dev, 0x1002d0, 0x00000001); /* refresh */ 299 nv_wr32(device, 0x1002d0, 0x00000001); /* refresh */
294 nv_mask(dev, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */ 300 nv_mask(device, 0x100210, 0x80000000, 0x00000000); /* no auto refresh */
295 nv_wr32(dev, 0x1002dc, 0x00000001); /* enable self-refresh */ 301 nv_wr32(device, 0x1002dc, 0x00000001); /* enable self-refresh */
296 302
297 /* change the PLL of each memory partition */ 303 /* change the PLL of each memory partition */
298 nv_mask(dev, 0x00c040, 0x0000c000, 0x00000000); 304 nv_mask(device, 0x00c040, 0x0000c000, 0x00000000);
299 switch (dev_priv->chipset) { 305 switch (nv_device(drm->device)->chipset) {
300 case 0x40: 306 case 0x40:
301 case 0x45: 307 case 0x45:
302 case 0x41: 308 case 0x41:
303 case 0x42: 309 case 0x42:
304 case 0x47: 310 case 0x47:
305 nv_mask(dev, 0x004044, 0xc0771100, info->mpll_ctrl); 311 nv_mask(device, 0x004044, 0xc0771100, info->mpll_ctrl);
306 nv_mask(dev, 0x00402c, 0xc0771100, info->mpll_ctrl); 312 nv_mask(device, 0x00402c, 0xc0771100, info->mpll_ctrl);
307 nv_wr32(dev, 0x004048, info->mpll_coef); 313 nv_wr32(device, 0x004048, info->mpll_coef);
308 nv_wr32(dev, 0x004030, info->mpll_coef); 314 nv_wr32(device, 0x004030, info->mpll_coef);
309 case 0x43: 315 case 0x43:
310 case 0x49: 316 case 0x49:
311 case 0x4b: 317 case 0x4b:
312 nv_mask(dev, 0x004038, 0xc0771100, info->mpll_ctrl); 318 nv_mask(device, 0x004038, 0xc0771100, info->mpll_ctrl);
313 nv_wr32(dev, 0x00403c, info->mpll_coef); 319 nv_wr32(device, 0x00403c, info->mpll_coef);
314 default: 320 default:
315 nv_mask(dev, 0x004020, 0xc0771100, info->mpll_ctrl); 321 nv_mask(device, 0x004020, 0xc0771100, info->mpll_ctrl);
316 nv_wr32(dev, 0x004024, info->mpll_coef); 322 nv_wr32(device, 0x004024, info->mpll_coef);
317 break; 323 break;
318 } 324 }
319 udelay(100); 325 udelay(100);
320 nv_mask(dev, 0x00c040, 0x0000c000, 0x0000c000); 326 nv_mask(device, 0x00c040, 0x0000c000, 0x0000c000);
321 327
322 /* re-enable normal operation of memory controller */ 328 /* re-enable normal operation of memory controller */
323 nv_wr32(dev, 0x1002dc, 0x00000000); 329 nv_wr32(device, 0x1002dc, 0x00000000);
324 nv_mask(dev, 0x100210, 0x80000000, 0x80000000); 330 nv_mask(device, 0x100210, 0x80000000, 0x80000000);
325 udelay(100); 331 udelay(100);
326 332
327 /* execute memory reset script from vbios */ 333 /* execute memory reset script from vbios */
328 if (!bit_table(dev, 'M', &M)) 334 if (!bit_table(dev, 'M', &M))
329 nouveau_bios_init_exec(dev, ROM16(M.data[0])); 335 nouveau_bios_run_init_table(dev, ROM16(M.data[0]), NULL, 0);
330 336
331 /* make sure we're in vblank (hopefully the same one as before), and 337 /* make sure we're in vblank (hopefully the same one as before), and
332 * then re-enable crtc memory access 338 * then re-enable crtc memory access
@@ -334,62 +340,14 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
334 for (i = 0; i < 2; i++) { 340 for (i = 0; i < 2; i++) {
335 if (!(crtc_mask & (1 << i))) 341 if (!(crtc_mask & (1 << i)))
336 continue; 342 continue;
337 nv_wait(dev, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000); 343 nv_wait(device, 0x600808 + (i * 0x2000), 0x00010000, 0x00010000);
338 nv_wr08(dev, 0x0c03c4 + (i * 0x2000), 0x01); 344 nv_wr08(device, 0x0c03c4 + (i * 0x2000), 0x01);
339 nv_wr08(dev, 0x0c03c5 + (i * 0x2000), sr1[i]); 345 nv_wr08(device, 0x0c03c5 + (i * 0x2000), sr1[i]);
340 } 346 }
341 347
342 /* resume engines */ 348 /* resume engines */
343resume: 349resume:
344 nv_wr32(dev, 0x003250, 0x00000001); 350 pfifo->start(pfifo, &flags);
345 nv_mask(dev, 0x003220, 0x00000001, 0x00000001);
346 nv_wr32(dev, 0x003200, 0x00000001);
347 nv_wr32(dev, 0x002500, 0x00000001);
348 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
349
350 kfree(info); 351 kfree(info);
351 return ret; 352 return ret;
352} 353}
353
354int
355nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
356{
357 if (line == 2) {
358 u32 reg = nv_rd32(dev, 0x0010f0);
359 if (reg & 0x80000000) {
360 *duty = (reg & 0x7fff0000) >> 16;
361 *divs = (reg & 0x00007fff);
362 return 0;
363 }
364 } else
365 if (line == 9) {
366 u32 reg = nv_rd32(dev, 0x0015f4);
367 if (reg & 0x80000000) {
368 *divs = nv_rd32(dev, 0x0015f8);
369 *duty = (reg & 0x7fffffff);
370 return 0;
371 }
372 } else {
373 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
374 return -ENODEV;
375 }
376
377 return -EINVAL;
378}
379
380int
381nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
382{
383 if (line == 2) {
384 nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
385 } else
386 if (line == 9) {
387 nv_wr32(dev, 0x0015f8, divs);
388 nv_wr32(dev, 0x0015f4, duty | 0x80000000);
389 } else {
390 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
391 return -ENODEV;
392 }
393
394 return 0;
395}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 7f3ae75032d6..222de77d6269 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -27,24 +27,27 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h" 30#include "nouveau_reg.h"
32#include "nouveau_drv.h" 31#include "nouveau_drm.h"
32#include "nouveau_dma.h"
33#include "nouveau_gem.h"
33#include "nouveau_hw.h" 34#include "nouveau_hw.h"
34#include "nouveau_encoder.h" 35#include "nouveau_encoder.h"
35#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
36#include "nouveau_fb.h"
37#include "nouveau_connector.h" 37#include "nouveau_connector.h"
38#include "nv50_display.h" 38#include "nv50_display.h"
39 39
40#include <subdev/clock.h>
41
40static void 42static void
41nv50_crtc_lut_load(struct drm_crtc *crtc) 43nv50_crtc_lut_load(struct drm_crtc *crtc)
42{ 44{
45 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
43 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 46 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
44 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); 47 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
45 int i; 48 int i;
46 49
47 NV_DEBUG_KMS(crtc->dev, "\n"); 50 NV_DEBUG(drm, "\n");
48 51
49 for (i = 0; i < 256; i++) { 52 for (i = 0; i < 256; i++) {
50 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); 53 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
@@ -63,25 +66,25 @@ int
63nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) 66nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
64{ 67{
65 struct drm_device *dev = nv_crtc->base.dev; 68 struct drm_device *dev = nv_crtc->base.dev;
66 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_drm *drm = nouveau_drm(dev);
67 struct nouveau_channel *evo = nv50_display(dev)->master; 70 struct nouveau_channel *evo = nv50_display(dev)->master;
68 int index = nv_crtc->index, ret; 71 int index = nv_crtc->index, ret;
69 72
70 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 73 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
71 NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked"); 74 NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
72 75
73 if (blanked) { 76 if (blanked) {
74 nv_crtc->cursor.hide(nv_crtc, false); 77 nv_crtc->cursor.hide(nv_crtc, false);
75 78
76 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5); 79 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
77 if (ret) { 80 if (ret) {
78 NV_ERROR(dev, "no space while blanking crtc\n"); 81 NV_ERROR(drm, "no space while blanking crtc\n");
79 return ret; 82 return ret;
80 } 83 }
81 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 84 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
82 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK); 85 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
83 OUT_RING(evo, 0); 86 OUT_RING(evo, 0);
84 if (dev_priv->chipset != 0x50) { 87 if (nv_device(drm->device)->chipset != 0x50) {
85 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 88 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
86 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE); 89 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
87 } 90 }
@@ -94,9 +97,9 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
94 else 97 else
95 nv_crtc->cursor.hide(nv_crtc, false); 98 nv_crtc->cursor.hide(nv_crtc, false);
96 99
97 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8); 100 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
98 if (ret) { 101 if (ret) {
99 NV_ERROR(dev, "no space while unblanking crtc\n"); 102 NV_ERROR(drm, "no space while unblanking crtc\n");
100 return ret; 103 return ret;
101 } 104 }
102 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2); 105 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
@@ -104,7 +107,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
104 NV50_EVO_CRTC_CLUT_MODE_OFF : 107 NV50_EVO_CRTC_CLUT_MODE_OFF :
105 NV50_EVO_CRTC_CLUT_MODE_ON); 108 NV50_EVO_CRTC_CLUT_MODE_ON);
106 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8); 109 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
107 if (dev_priv->chipset != 0x50) { 110 if (nv_device(drm->device)->chipset != 0x50) {
108 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1); 111 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
109 OUT_RING(evo, NvEvoVRAM); 112 OUT_RING(evo, NvEvoVRAM);
110 } 113 }
@@ -113,7 +116,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
113 OUT_RING(evo, nv_crtc->fb.offset >> 8); 116 OUT_RING(evo, nv_crtc->fb.offset >> 8);
114 OUT_RING(evo, 0); 117 OUT_RING(evo, 0);
115 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); 118 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
116 if (dev_priv->chipset != 0x50) 119 if (nv_device(drm->device)->chipset != 0x50)
117 if (nv_crtc->fb.tile_flags == 0x7a00 || 120 if (nv_crtc->fb.tile_flags == 0x7a00 ||
118 nv_crtc->fb.tile_flags == 0xfe00) 121 nv_crtc->fb.tile_flags == 0xfe00)
119 OUT_RING(evo, NvEvoFB32); 122 OUT_RING(evo, NvEvoFB32);
@@ -173,17 +176,18 @@ static int
173nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) 176nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
174{ 177{
175 struct drm_device *dev = nv_crtc->base.dev; 178 struct drm_device *dev = nv_crtc->base.dev;
179 struct nouveau_drm *drm = nouveau_drm(dev);
176 struct nouveau_channel *evo = nv50_display(dev)->master; 180 struct nouveau_channel *evo = nv50_display(dev)->master;
177 int ret; 181 int ret;
178 int adj; 182 int adj;
179 u32 hue, vib; 183 u32 hue, vib;
180 184
181 NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n", 185 NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
182 nv_crtc->color_vibrance, nv_crtc->vibrant_hue); 186 nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
183 187
184 ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); 188 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
185 if (ret) { 189 if (ret) {
186 NV_ERROR(dev, "no space while setting color vibrance\n"); 190 NV_ERROR(drm, "no space while setting color vibrance\n");
187 return ret; 191 return ret;
188 } 192 }
189 193
@@ -228,17 +232,18 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
228 struct nouveau_connector *nv_connector; 232 struct nouveau_connector *nv_connector;
229 struct drm_crtc *crtc = &nv_crtc->base; 233 struct drm_crtc *crtc = &nv_crtc->base;
230 struct drm_device *dev = crtc->dev; 234 struct drm_device *dev = crtc->dev;
235 struct nouveau_drm *drm = nouveau_drm(dev);
231 struct nouveau_channel *evo = nv50_display(dev)->master; 236 struct nouveau_channel *evo = nv50_display(dev)->master;
232 struct drm_display_mode *umode = &crtc->mode; 237 struct drm_display_mode *umode = &crtc->mode;
233 struct drm_display_mode *omode; 238 struct drm_display_mode *omode;
234 int scaling_mode, ret; 239 int scaling_mode, ret;
235 u32 ctrl = 0, oX, oY; 240 u32 ctrl = 0, oX, oY;
236 241
237 NV_DEBUG_KMS(dev, "\n"); 242 NV_DEBUG(drm, "\n");
238 243
239 nv_connector = nouveau_crtc_connector_get(nv_crtc); 244 nv_connector = nouveau_crtc_connector_get(nv_crtc);
240 if (!nv_connector || !nv_connector->native_mode) { 245 if (!nv_connector || !nv_connector->native_mode) {
241 NV_ERROR(dev, "no native mode, forcing panel scaling\n"); 246 NV_ERROR(drm, "no native mode, forcing panel scaling\n");
242 scaling_mode = DRM_MODE_SCALE_NONE; 247 scaling_mode = DRM_MODE_SCALE_NONE;
243 } else { 248 } else {
244 scaling_mode = nv_connector->scaling_mode; 249 scaling_mode = nv_connector->scaling_mode;
@@ -328,63 +333,19 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
328int 333int
329nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk) 334nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
330{ 335{
331 struct drm_nouveau_private *dev_priv = dev->dev_private; 336 struct nouveau_device *device = nouveau_dev(dev);
332 struct pll_lims pll; 337 struct nouveau_clock *clk = nouveau_clock(device);
333 uint32_t reg1, reg2;
334 int ret, N1, M1, N2, M2, P;
335
336 ret = get_pll_limits(dev, PLL_VPLL0 + head, &pll);
337 if (ret)
338 return ret;
339
340 if (pll.vco2.maxfreq) {
341 ret = nv50_calc_pll(dev, &pll, pclk, &N1, &M1, &N2, &M2, &P);
342 if (ret <= 0)
343 return 0;
344
345 NV_DEBUG(dev, "pclk %d out %d NM1 %d %d NM2 %d %d P %d\n",
346 pclk, ret, N1, M1, N2, M2, P);
347
348 reg1 = nv_rd32(dev, pll.reg + 4) & 0xff00ff00;
349 reg2 = nv_rd32(dev, pll.reg + 8) & 0x8000ff00;
350 nv_wr32(dev, pll.reg + 0, 0x10000611);
351 nv_wr32(dev, pll.reg + 4, reg1 | (M1 << 16) | N1);
352 nv_wr32(dev, pll.reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
353 } else
354 if (dev_priv->chipset < NV_C0) {
355 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
356 if (ret <= 0)
357 return 0;
358
359 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
360 pclk, ret, N1, N2, M1, P);
361
362 reg1 = nv_rd32(dev, pll.reg + 4) & 0xffc00000;
363 nv_wr32(dev, pll.reg + 0, 0x50000610);
364 nv_wr32(dev, pll.reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
365 nv_wr32(dev, pll.reg + 8, N2);
366 } else {
367 ret = nva3_calc_pll(dev, &pll, pclk, &N1, &N2, &M1, &P);
368 if (ret <= 0)
369 return 0;
370
371 NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
372 pclk, ret, N1, N2, M1, P);
373 338
374 nv_mask(dev, pll.reg + 0x0c, 0x00000000, 0x00000100); 339 return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
375 nv_wr32(dev, pll.reg + 0x04, (P << 16) | (N1 << 8) | M1);
376 nv_wr32(dev, pll.reg + 0x10, N2 << 16);
377 }
378
379 return 0;
380} 340}
381 341
382static void 342static void
383nv50_crtc_destroy(struct drm_crtc *crtc) 343nv50_crtc_destroy(struct drm_crtc *crtc)
384{ 344{
385 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 345 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
346 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
386 347
387 NV_DEBUG_KMS(crtc->dev, "\n"); 348 NV_DEBUG(drm, "\n");
388 349
389 nouveau_bo_unmap(nv_crtc->lut.nvbo); 350 nouveau_bo_unmap(nv_crtc->lut.nvbo);
390 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); 351 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
@@ -473,13 +434,15 @@ nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
473static void 434static void
474nv50_crtc_save(struct drm_crtc *crtc) 435nv50_crtc_save(struct drm_crtc *crtc)
475{ 436{
476 NV_ERROR(crtc->dev, "!!\n"); 437 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
438 NV_ERROR(drm, "!!\n");
477} 439}
478 440
479static void 441static void
480nv50_crtc_restore(struct drm_crtc *crtc) 442nv50_crtc_restore(struct drm_crtc *crtc)
481{ 443{
482 NV_ERROR(crtc->dev, "!!\n"); 444 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
445 NV_ERROR(drm, "!!\n");
483} 446}
484 447
485static const struct drm_crtc_funcs nv50_crtc_funcs = { 448static const struct drm_crtc_funcs nv50_crtc_funcs = {
@@ -503,8 +466,9 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
503{ 466{
504 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 467 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
505 struct drm_device *dev = crtc->dev; 468 struct drm_device *dev = crtc->dev;
469 struct nouveau_drm *drm = nouveau_drm(dev);
506 470
507 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 471 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
508 472
509 nv50_display_flip_stop(crtc); 473 nv50_display_flip_stop(crtc);
510 drm_vblank_pre_modeset(dev, nv_crtc->index); 474 drm_vblank_pre_modeset(dev, nv_crtc->index);
@@ -515,9 +479,10 @@ static void
515nv50_crtc_commit(struct drm_crtc *crtc) 479nv50_crtc_commit(struct drm_crtc *crtc)
516{ 480{
517 struct drm_device *dev = crtc->dev; 481 struct drm_device *dev = crtc->dev;
482 struct nouveau_drm *drm = nouveau_drm(dev);
518 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 483 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
519 484
520 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 485 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
521 486
522 nv50_crtc_blank(nv_crtc, false); 487 nv50_crtc_blank(nv_crtc, false);
523 drm_vblank_post_modeset(dev, nv_crtc->index); 488 drm_vblank_post_modeset(dev, nv_crtc->index);
@@ -539,17 +504,17 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
539{ 504{
540 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 505 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
541 struct drm_device *dev = nv_crtc->base.dev; 506 struct drm_device *dev = nv_crtc->base.dev;
542 struct drm_nouveau_private *dev_priv = dev->dev_private; 507 struct nouveau_drm *drm = nouveau_drm(dev);
543 struct nouveau_channel *evo = nv50_display(dev)->master; 508 struct nouveau_channel *evo = nv50_display(dev)->master;
544 struct drm_framebuffer *drm_fb; 509 struct drm_framebuffer *drm_fb;
545 struct nouveau_framebuffer *fb; 510 struct nouveau_framebuffer *fb;
546 int ret; 511 int ret;
547 512
548 NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); 513 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
549 514
550 /* no fb bound */ 515 /* no fb bound */
551 if (!atomic && !crtc->fb) { 516 if (!atomic && !crtc->fb) {
552 NV_DEBUG_KMS(dev, "No FB bound\n"); 517 NV_DEBUG(drm, "No FB bound\n");
553 return 0; 518 return 0;
554 } 519 }
555 520
@@ -579,7 +544,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
579 nv_crtc->fb.offset = fb->nvbo->bo.offset; 544 nv_crtc->fb.offset = fb->nvbo->bo.offset;
580 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); 545 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
581 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; 546 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
582 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { 547 if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
583 ret = RING_SPACE(evo, 2); 548 ret = RING_SPACE(evo, 2);
584 if (ret) 549 if (ret)
585 return ret; 550 return ret;
@@ -737,10 +702,11 @@ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
737int 702int
738nv50_crtc_create(struct drm_device *dev, int index) 703nv50_crtc_create(struct drm_device *dev, int index)
739{ 704{
705 struct nouveau_drm *drm = nouveau_drm(dev);
740 struct nouveau_crtc *nv_crtc = NULL; 706 struct nouveau_crtc *nv_crtc = NULL;
741 int ret, i; 707 int ret, i;
742 708
743 NV_DEBUG_KMS(dev, "\n"); 709 NV_DEBUG(drm, "\n");
744 710
745 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); 711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
746 if (!nv_crtc) 712 if (!nv_crtc)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index b290b7b1f65d..223da113ceee 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -26,9 +26,8 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28 28
29#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 29#include "nouveau_drm.h"
30#include "nouveau_reg.h" 30#include "nouveau_dma.h"
31#include "nouveau_drv.h"
32#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
33#include "nv50_display.h" 32#include "nv50_display.h"
34 33
@@ -36,22 +35,22 @@ static void
36nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) 35nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
37{ 36{
38 struct drm_device *dev = nv_crtc->base.dev; 37 struct drm_device *dev = nv_crtc->base.dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private; 38 struct nouveau_drm *drm = nouveau_drm(dev);
40 struct nouveau_channel *evo = nv50_display(dev)->master; 39 struct nouveau_channel *evo = nv50_display(dev)->master;
41 int ret; 40 int ret;
42 41
43 NV_DEBUG_KMS(dev, "\n"); 42 NV_DEBUG(drm, "\n");
44 43
45 if (update && nv_crtc->cursor.visible) 44 if (update && nv_crtc->cursor.visible)
46 return; 45 return;
47 46
48 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); 47 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
49 if (ret) { 48 if (ret) {
50 NV_ERROR(dev, "no space while unhiding cursor\n"); 49 NV_ERROR(drm, "no space while unhiding cursor\n");
51 return; 50 return;
52 } 51 }
53 52
54 if (dev_priv->chipset != 0x50) { 53 if (nv_device(drm->device)->chipset != 0x50) {
55 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 54 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
56 OUT_RING(evo, NvEvoVRAM); 55 OUT_RING(evo, NvEvoVRAM);
57 } 56 }
@@ -71,24 +70,24 @@ static void
71nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) 70nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
72{ 71{
73 struct drm_device *dev = nv_crtc->base.dev; 72 struct drm_device *dev = nv_crtc->base.dev;
74 struct drm_nouveau_private *dev_priv = dev->dev_private; 73 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct nouveau_channel *evo = nv50_display(dev)->master; 74 struct nouveau_channel *evo = nv50_display(dev)->master;
76 int ret; 75 int ret;
77 76
78 NV_DEBUG_KMS(dev, "\n"); 77 NV_DEBUG(drm, "\n");
79 78
80 if (update && !nv_crtc->cursor.visible) 79 if (update && !nv_crtc->cursor.visible)
81 return; 80 return;
82 81
83 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2); 82 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
84 if (ret) { 83 if (ret) {
85 NV_ERROR(dev, "no space while hiding cursor\n"); 84 NV_ERROR(drm, "no space while hiding cursor\n");
86 return; 85 return;
87 } 86 }
88 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2); 87 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
89 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE); 88 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
90 OUT_RING(evo, 0); 89 OUT_RING(evo, 0);
91 if (dev_priv->chipset != 0x50) { 90 if (nv_device(drm->device)->chipset != 0x50) {
92 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1); 91 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
93 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE); 92 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
94 } 93 }
@@ -104,19 +103,18 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
104static void 103static void
105nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) 104nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
106{ 105{
107 struct drm_device *dev = nv_crtc->base.dev; 106 struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
108 107
109 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y; 108 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index), 109 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF)); 110 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */ 111 /* Needed to make the cursor move. */
113 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0); 112 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
114} 113}
115 114
116static void 115static void
117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) 116nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
118{ 117{
119 NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
120 if (offset == nv_crtc->cursor.offset) 118 if (offset == nv_crtc->cursor.offset)
121 return; 119 return;
122 120
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 2bff2e588d87..6a30a1748573 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -29,18 +29,21 @@
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h" 31#include "nouveau_reg.h"
32#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <subdev/timer.h>
40
39static void 41static void
40nv50_dac_disconnect(struct drm_encoder *encoder) 42nv50_dac_disconnect(struct drm_encoder *encoder)
41{ 43{
42 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 44 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
43 struct drm_device *dev = encoder->dev; 45 struct drm_device *dev = encoder->dev;
46 struct nouveau_drm *drm = nouveau_drm(dev);
44 struct nouveau_channel *evo = nv50_display(dev)->master; 47 struct nouveau_channel *evo = nv50_display(dev)->master;
45 int ret; 48 int ret;
46 49
@@ -48,11 +51,11 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
48 return; 51 return;
49 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); 52 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
50 53
51 NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); 54 NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
52 55
53 ret = RING_SPACE(evo, 4); 56 ret = RING_SPACE(evo, 4);
54 if (ret) { 57 if (ret) {
55 NV_ERROR(dev, "no space while disconnecting DAC\n"); 58 NV_ERROR(drm, "no space while disconnecting DAC\n");
56 return; 59 return;
57 } 60 }
58 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); 61 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
@@ -67,43 +70,43 @@ static enum drm_connector_status
67nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) 70nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
68{ 71{
69 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 72 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
70 struct drm_device *dev = encoder->dev; 73 struct nouveau_device *device = nouveau_dev(encoder->dev);
71 struct drm_nouveau_private *dev_priv = dev->dev_private; 74 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
72 enum drm_connector_status status = connector_status_disconnected; 75 enum drm_connector_status status = connector_status_disconnected;
73 uint32_t dpms_state, load_pattern, load_state; 76 uint32_t dpms_state, load_pattern, load_state;
74 int or = nv_encoder->or; 77 int or = nv_encoder->or;
75 78
76 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001); 79 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
77 dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)); 80 dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
78 81
79 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 82 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
80 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 83 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
81 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 84 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
82 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 85 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
83 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 86 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
84 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 87 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
85 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); 88 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
86 return status; 89 return status;
87 } 90 }
88 91
89 /* Use bios provided value if possible. */ 92 /* Use bios provided value if possible. */
90 if (dev_priv->vbios.dactestval) { 93 if (drm->vbios.dactestval) {
91 load_pattern = dev_priv->vbios.dactestval; 94 load_pattern = drm->vbios.dactestval;
92 NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", 95 NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
93 load_pattern); 96 load_pattern);
94 } else { 97 } else {
95 load_pattern = 340; 98 load_pattern = 340;
96 NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", 99 NV_DEBUG(drm, "Using default load_pattern of %d\n",
97 load_pattern); 100 load_pattern);
98 } 101 }
99 102
100 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 103 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
101 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern); 104 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
102 mdelay(45); /* give it some time to process */ 105 mdelay(45); /* give it some time to process */
103 load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or)); 106 load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
104 107
105 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0); 108 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
106 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state | 109 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
107 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 110 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
108 111
109 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) == 112 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
@@ -111,9 +114,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
111 status = connector_status_connected; 114 status = connector_status_connected;
112 115
113 if (status == connector_status_connected) 116 if (status == connector_status_connected)
114 NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); 117 NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
115 else 118 else
116 NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); 119 NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
117 120
118 return status; 121 return status;
119} 122}
@@ -121,23 +124,24 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
121static void 124static void
122nv50_dac_dpms(struct drm_encoder *encoder, int mode) 125nv50_dac_dpms(struct drm_encoder *encoder, int mode)
123{ 126{
124 struct drm_device *dev = encoder->dev; 127 struct nouveau_device *device = nouveau_dev(encoder->dev);
128 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
125 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
126 uint32_t val; 130 uint32_t val;
127 int or = nv_encoder->or; 131 int or = nv_encoder->or;
128 132
129 NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); 133 NV_DEBUG(drm, "or %d mode %d\n", or, mode);
130 134
131 /* wait for it to be done */ 135 /* wait for it to be done */
132 if (!nv_wait(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), 136 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
133 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) { 137 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
134 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or); 138 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
135 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or, 139 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
136 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or))); 140 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
137 return; 141 return;
138 } 142 }
139 143
140 val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F; 144 val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
141 145
142 if (mode != DRM_MODE_DPMS_ON) 146 if (mode != DRM_MODE_DPMS_ON)
143 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED; 147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
@@ -158,20 +162,22 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
158 break; 162 break;
159 } 163 }
160 164
161 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val | 165 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
162 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 166 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
163} 167}
164 168
165static void 169static void
166nv50_dac_save(struct drm_encoder *encoder) 170nv50_dac_save(struct drm_encoder *encoder)
167{ 171{
168 NV_ERROR(encoder->dev, "!!\n"); 172 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
173 NV_ERROR(drm, "!!\n");
169} 174}
170 175
171static void 176static void
172nv50_dac_restore(struct drm_encoder *encoder) 177nv50_dac_restore(struct drm_encoder *encoder)
173{ 178{
174 NV_ERROR(encoder->dev, "!!\n"); 179 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
180 NV_ERROR(drm, "!!\n");
175} 181}
176 182
177static bool 183static bool
@@ -179,14 +185,15 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder,
179 const struct drm_display_mode *mode, 185 const struct drm_display_mode *mode,
180 struct drm_display_mode *adjusted_mode) 186 struct drm_display_mode *adjusted_mode)
181{ 187{
188 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
182 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 189 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
183 struct nouveau_connector *connector; 190 struct nouveau_connector *connector;
184 191
185 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); 192 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
186 193
187 connector = nouveau_encoder_connector_get(nv_encoder); 194 connector = nouveau_encoder_connector_get(nv_encoder);
188 if (!connector) { 195 if (!connector) {
189 NV_ERROR(encoder->dev, "Encoder has no connector\n"); 196 NV_ERROR(drm, "Encoder has no connector\n");
190 return false; 197 return false;
191 } 198 }
192 199
@@ -207,13 +214,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
207 struct drm_display_mode *adjusted_mode) 214 struct drm_display_mode *adjusted_mode)
208{ 215{
209 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 216 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
217 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
210 struct drm_device *dev = encoder->dev; 218 struct drm_device *dev = encoder->dev;
211 struct nouveau_channel *evo = nv50_display(dev)->master; 219 struct nouveau_channel *evo = nv50_display(dev)->master;
212 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 220 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
213 uint32_t mode_ctl = 0, mode_ctl2 = 0; 221 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret; 222 int ret;
215 223
216 NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", 224 NV_DEBUG(drm, "or %d type %d crtc %d\n",
217 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 225 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
218 226
219 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); 227 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -224,10 +232,10 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
224 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0; 232 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
225 233
226 /* Lacking a working tv-out, this is not a 100% sure. */ 234 /* Lacking a working tv-out, this is not a 100% sure. */
227 if (nv_encoder->dcb->type == OUTPUT_ANALOG) 235 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
228 mode_ctl |= 0x40; 236 mode_ctl |= 0x40;
229 else 237 else
230 if (nv_encoder->dcb->type == OUTPUT_TV) 238 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
231 mode_ctl |= 0x100; 239 mode_ctl |= 0x100;
232 240
233 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 241 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -238,7 +246,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
238 246
239 ret = RING_SPACE(evo, 3); 247 ret = RING_SPACE(evo, 3);
240 if (ret) { 248 if (ret) {
241 NV_ERROR(dev, "no space while connecting DAC\n"); 249 NV_ERROR(drm, "no space while connecting DAC\n");
242 return; 250 return;
243 } 251 }
244 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); 252 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
@@ -271,11 +279,12 @@ static void
271nv50_dac_destroy(struct drm_encoder *encoder) 279nv50_dac_destroy(struct drm_encoder *encoder)
272{ 280{
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 281 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
282 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
274 283
275 if (!encoder) 284 if (!encoder)
276 return; 285 return;
277 286
278 NV_DEBUG_KMS(encoder->dev, "\n"); 287 NV_DEBUG(drm, "\n");
279 288
280 drm_encoder_cleanup(encoder); 289 drm_encoder_cleanup(encoder);
281 kfree(nv_encoder); 290 kfree(nv_encoder);
@@ -286,7 +295,7 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
286}; 295};
287 296
288int 297int
289nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) 298nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
290{ 299{
291 struct nouveau_encoder *nv_encoder; 300 struct nouveau_encoder *nv_encoder;
292 struct drm_encoder *encoder; 301 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f868a13e5c2d..f97b42cbb6bb 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,28 +24,30 @@
24 * 24 *
25 */ 25 */
26 26
27#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 27#include "nouveau_drm.h"
28#include "nouveau_dma.h"
29
28#include "nv50_display.h" 30#include "nv50_display.h"
29#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
30#include "nouveau_encoder.h" 32#include "nouveau_encoder.h"
31#include "nouveau_connector.h" 33#include "nouveau_connector.h"
32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h" 34#include "nouveau_fbcon.h"
34#include "nouveau_ramht.h"
35#include "nouveau_software.h"
36#include <drm/drm_crtc_helper.h> 35#include <drm/drm_crtc_helper.h>
36#include "nouveau_fence.h"
37
38#include <core/gpuobj.h>
39#include <subdev/timer.h>
37 40
38static void nv50_display_isr(struct drm_device *);
39static void nv50_display_bh(unsigned long); 41static void nv50_display_bh(unsigned long);
40 42
41static inline int 43static inline int
42nv50_sor_nr(struct drm_device *dev) 44nv50_sor_nr(struct drm_device *dev)
43{ 45{
44 struct drm_nouveau_private *dev_priv = dev->dev_private; 46 struct nouveau_device *device = nouveau_dev(dev);
45 47
46 if (dev_priv->chipset < 0x90 || 48 if (device->chipset < 0x90 ||
47 dev_priv->chipset == 0x92 || 49 device->chipset == 0x92 ||
48 dev_priv->chipset == 0xa0) 50 device->chipset == 0xa0)
49 return 2; 51 return 2;
50 52
51 return 4; 53 return 4;
@@ -54,73 +56,29 @@ nv50_sor_nr(struct drm_device *dev)
54u32 56u32
55nv50_display_active_crtcs(struct drm_device *dev) 57nv50_display_active_crtcs(struct drm_device *dev)
56{ 58{
57 struct drm_nouveau_private *dev_priv = dev->dev_private; 59 struct nouveau_device *device = nouveau_dev(dev);
58 u32 mask = 0; 60 u32 mask = 0;
59 int i; 61 int i;
60 62
61 if (dev_priv->chipset < 0x90 || 63 if (device->chipset < 0x90 ||
62 dev_priv->chipset == 0x92 || 64 device->chipset == 0x92 ||
63 dev_priv->chipset == 0xa0) { 65 device->chipset == 0xa0) {
64 for (i = 0; i < 2; i++) 66 for (i = 0; i < 2; i++)
65 mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 67 mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
66 } else { 68 } else {
67 for (i = 0; i < 4; i++) 69 for (i = 0; i < 4; i++)
68 mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 70 mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
69 } 71 }
70 72
71 for (i = 0; i < 3; i++) 73 for (i = 0; i < 3; i++)
72 mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 74 mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
73 75
74 return mask & 3; 76 return mask & 3;
75} 77}
76 78
77static int
78evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
79{
80 int ret = 0;
81 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
82 nv_wr32(dev, 0x610304 + (ch * 0x08), data);
83 nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
84 if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
85 ret = -EBUSY;
86 if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
87 NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
88 nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
89 return ret;
90}
91
92int 79int
93nv50_display_early_init(struct drm_device *dev) 80nv50_display_early_init(struct drm_device *dev)
94{ 81{
95 u32 ctrl = nv_rd32(dev, 0x610200);
96 int i;
97
98 /* check if master evo channel is already active, a good a sign as any
99 * that the display engine is in a weird state (hibernate/kexec), if
100 * it is, do our best to reset the display engine...
101 */
102 if ((ctrl & 0x00000003) == 0x00000003) {
103 NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
104
105 /* deactivate both heads first, PDISP will disappear forever
106 * (well, until you power cycle) on some boards as soon as
107 * PMC_ENABLE is hit unless they are..
108 */
109 for (i = 0; i < 2; i++) {
110 evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
111 evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
112 evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
113 evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
114 evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
115 evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
116 }
117 evo_icmd(dev, 0, 0x0080, 0);
118
119 /* reset PDISP */
120 nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
121 nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
122 }
123
124 return 0; 82 return 0;
125} 83}
126 84
@@ -132,11 +90,8 @@ nv50_display_late_takedown(struct drm_device *dev)
132int 90int
133nv50_display_sync(struct drm_device *dev) 91nv50_display_sync(struct drm_device *dev)
134{ 92{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
137 struct nv50_display *disp = nv50_display(dev); 93 struct nv50_display *disp = nv50_display(dev);
138 struct nouveau_channel *evo = disp->master; 94 struct nouveau_channel *evo = disp->master;
139 u64 start;
140 int ret; 95 int ret;
141 96
142 ret = RING_SPACE(evo, 6); 97 ret = RING_SPACE(evo, 6);
@@ -148,29 +103,28 @@ nv50_display_sync(struct drm_device *dev)
148 BEGIN_NV04(evo, 0, 0x0084, 1); 103 BEGIN_NV04(evo, 0, 0x0084, 1);
149 OUT_RING (evo, 0x00000000); 104 OUT_RING (evo, 0x00000000);
150 105
151 nv_wo32(disp->ntfy, 0x000, 0x00000000); 106 nv_wo32(disp->ramin, 0x2000, 0x00000000);
152 FIRE_RING (evo); 107 FIRE_RING (evo);
153 108
154 start = ptimer->read(dev); 109 if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000))
155 do { 110 return 0;
156 if (nv_ro32(disp->ntfy, 0x000))
157 return 0;
158 } while (ptimer->read(dev) - start < 2000000000ULL);
159 } 111 }
160 112
161 return -EBUSY; 113 return 0;
162} 114}
163 115
164int 116int
165nv50_display_init(struct drm_device *dev) 117nv50_display_init(struct drm_device *dev)
166{ 118{
119 struct nouveau_drm *drm = nouveau_drm(dev);
120 struct nouveau_device *device = nouveau_dev(dev);
167 struct nouveau_channel *evo; 121 struct nouveau_channel *evo;
168 int ret, i; 122 int ret, i;
169 u32 val; 123 u32 val;
170 124
171 NV_DEBUG_KMS(dev, "\n"); 125 NV_DEBUG(drm, "\n");
172 126
173 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); 127 nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004));
174 128
175 /* 129 /*
176 * I think the 0x006101XX range is some kind of main control area 130 * I think the 0x006101XX range is some kind of main control area
@@ -178,82 +132,82 @@ nv50_display_init(struct drm_device *dev)
178 */ 132 */
179 /* CRTC? */ 133 /* CRTC? */
180 for (i = 0; i < 2; i++) { 134 for (i = 0; i < 2; i++) {
181 val = nv_rd32(dev, 0x00616100 + (i * 0x800)); 135 val = nv_rd32(device, 0x00616100 + (i * 0x800));
182 nv_wr32(dev, 0x00610190 + (i * 0x10), val); 136 nv_wr32(device, 0x00610190 + (i * 0x10), val);
183 val = nv_rd32(dev, 0x00616104 + (i * 0x800)); 137 val = nv_rd32(device, 0x00616104 + (i * 0x800));
184 nv_wr32(dev, 0x00610194 + (i * 0x10), val); 138 nv_wr32(device, 0x00610194 + (i * 0x10), val);
185 val = nv_rd32(dev, 0x00616108 + (i * 0x800)); 139 val = nv_rd32(device, 0x00616108 + (i * 0x800));
186 nv_wr32(dev, 0x00610198 + (i * 0x10), val); 140 nv_wr32(device, 0x00610198 + (i * 0x10), val);
187 val = nv_rd32(dev, 0x0061610c + (i * 0x800)); 141 val = nv_rd32(device, 0x0061610c + (i * 0x800));
188 nv_wr32(dev, 0x0061019c + (i * 0x10), val); 142 nv_wr32(device, 0x0061019c + (i * 0x10), val);
189 } 143 }
190 144
191 /* DAC */ 145 /* DAC */
192 for (i = 0; i < 3; i++) { 146 for (i = 0; i < 3; i++) {
193 val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); 147 val = nv_rd32(device, 0x0061a000 + (i * 0x800));
194 nv_wr32(dev, 0x006101d0 + (i * 0x04), val); 148 nv_wr32(device, 0x006101d0 + (i * 0x04), val);
195 } 149 }
196 150
197 /* SOR */ 151 /* SOR */
198 for (i = 0; i < nv50_sor_nr(dev); i++) { 152 for (i = 0; i < nv50_sor_nr(dev); i++) {
199 val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); 153 val = nv_rd32(device, 0x0061c000 + (i * 0x800));
200 nv_wr32(dev, 0x006101e0 + (i * 0x04), val); 154 nv_wr32(device, 0x006101e0 + (i * 0x04), val);
201 } 155 }
202 156
203 /* EXT */ 157 /* EXT */
204 for (i = 0; i < 3; i++) { 158 for (i = 0; i < 3; i++) {
205 val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); 159 val = nv_rd32(device, 0x0061e000 + (i * 0x800));
206 nv_wr32(dev, 0x006101f0 + (i * 0x04), val); 160 nv_wr32(device, 0x006101f0 + (i * 0x04), val);
207 } 161 }
208 162
209 for (i = 0; i < 3; i++) { 163 for (i = 0; i < 3; i++) {
210 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 | 164 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
211 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING); 165 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
212 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); 166 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
213 } 167 }
214 168
215 /* The precise purpose is unknown, i suspect it has something to do 169 /* The precise purpose is unknown, i suspect it has something to do
216 * with text mode. 170 * with text mode.
217 */ 171 */
218 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) { 172 if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
219 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100); 173 nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
220 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1); 174 nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
221 if (!nv_wait(dev, 0x006194e8, 2, 0)) { 175 if (!nv_wait(device, 0x006194e8, 2, 0)) {
222 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n"); 176 NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
223 NV_ERROR(dev, "0x6194e8 = 0x%08x\n", 177 NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
224 nv_rd32(dev, 0x6194e8)); 178 nv_rd32(device, 0x6194e8));
225 return -EBUSY; 179 return -EBUSY;
226 } 180 }
227 } 181 }
228 182
229 for (i = 0; i < 2; i++) { 183 for (i = 0; i < 2; i++) {
230 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); 184 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
231 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 185 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
232 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 186 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
233 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 187 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
234 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 188 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
235 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 189 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
236 return -EBUSY; 190 return -EBUSY;
237 } 191 }
238 192
239 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 193 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
240 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); 194 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
241 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 195 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
242 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 196 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
243 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { 197 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
244 NV_ERROR(dev, "timeout: " 198 NV_ERROR(drm, "timeout: "
245 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); 199 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
246 NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i, 200 NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
247 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 201 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
248 return -EBUSY; 202 return -EBUSY;
249 } 203 }
250 } 204 }
251 205
252 nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); 206 nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
253 nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); 207 nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
254 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); 208 nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
255 nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); 209 nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
256 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 210 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
257 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | 211 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
258 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | 212 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
259 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); 213 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
@@ -263,7 +217,7 @@ nv50_display_init(struct drm_device *dev)
263 return ret; 217 return ret;
264 evo = nv50_display(dev)->master; 218 evo = nv50_display(dev)->master;
265 219
266 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); 220 nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
267 221
268 ret = RING_SPACE(evo, 3); 222 ret = RING_SPACE(evo, 3);
269 if (ret) 223 if (ret)
@@ -278,12 +232,14 @@ nv50_display_init(struct drm_device *dev)
278void 232void
279nv50_display_fini(struct drm_device *dev) 233nv50_display_fini(struct drm_device *dev)
280{ 234{
235 struct nouveau_drm *drm = nouveau_drm(dev);
236 struct nouveau_device *device = nouveau_dev(dev);
281 struct nv50_display *disp = nv50_display(dev); 237 struct nv50_display *disp = nv50_display(dev);
282 struct nouveau_channel *evo = disp->master; 238 struct nouveau_channel *evo = disp->master;
283 struct drm_crtc *drm_crtc; 239 struct drm_crtc *drm_crtc;
284 int ret, i; 240 int ret, i;
285 241
286 NV_DEBUG_KMS(dev, "\n"); 242 NV_DEBUG(drm, "\n");
287 243
288 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 244 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
289 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 245 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
@@ -308,55 +264,59 @@ nv50_display_fini(struct drm_device *dev)
308 if (!crtc->base.enabled) 264 if (!crtc->base.enabled)
309 continue; 265 continue;
310 266
311 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask); 267 nv_wr32(device, NV50_PDISPLAY_INTR_1, mask);
312 if (!nv_wait(dev, NV50_PDISPLAY_INTR_1, mask, mask)) { 268 if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) {
313 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == " 269 NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == "
314 "0x%08x\n", mask, mask); 270 "0x%08x\n", mask, mask);
315 NV_ERROR(dev, "0x610024 = 0x%08x\n", 271 NV_ERROR(drm, "0x610024 = 0x%08x\n",
316 nv_rd32(dev, NV50_PDISPLAY_INTR_1)); 272 nv_rd32(device, NV50_PDISPLAY_INTR_1));
317 } 273 }
318 } 274 }
319 275
320 for (i = 0; i < 2; i++) { 276 for (i = 0; i < 2; i++) {
321 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); 277 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0);
322 if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 278 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
323 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { 279 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
324 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); 280 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
325 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", 281 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
326 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); 282 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
327 } 283 }
328 } 284 }
329 285
330 nv50_evo_fini(dev); 286 nv50_evo_fini(dev);
331 287
332 for (i = 0; i < 3; i++) { 288 for (i = 0; i < 3; i++) {
333 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), 289 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i),
334 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 290 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
335 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); 291 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
336 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, 292 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
337 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i))); 293 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
338 } 294 }
339 } 295 }
340 296
341 /* disable interrupts. */ 297 /* disable interrupts. */
342 nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); 298 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
343} 299}
344 300
345int 301int
346nv50_display_create(struct drm_device *dev) 302nv50_display_create(struct drm_device *dev)
347{ 303{
348 struct drm_nouveau_private *dev_priv = dev->dev_private; 304 struct nouveau_drm *drm = nouveau_drm(dev);
349 struct dcb_table *dcb = &dev_priv->vbios.dcb; 305 struct dcb_table *dcb = &drm->vbios.dcb;
350 struct drm_connector *connector, *ct; 306 struct drm_connector *connector, *ct;
351 struct nv50_display *priv; 307 struct nv50_display *priv;
352 int ret, i; 308 int ret, i;
353 309
354 NV_DEBUG_KMS(dev, "\n"); 310 NV_DEBUG(drm, "\n");
355 311
356 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 312 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
357 if (!priv) 313 if (!priv)
358 return -ENOMEM; 314 return -ENOMEM;
359 dev_priv->engine.display.priv = priv; 315
316 nouveau_display(dev)->priv = priv;
317 nouveau_display(dev)->dtor = nv50_display_destroy;
318 nouveau_display(dev)->init = nv50_display_init;
319 nouveau_display(dev)->fini = nv50_display_fini;
360 320
361 /* Create CRTC objects */ 321 /* Create CRTC objects */
362 for (i = 0; i < 2; i++) { 322 for (i = 0; i < 2; i++) {
@@ -367,10 +327,10 @@ nv50_display_create(struct drm_device *dev)
367 327
368 /* We setup the encoders from the BIOS table */ 328 /* We setup the encoders from the BIOS table */
369 for (i = 0 ; i < dcb->entries; i++) { 329 for (i = 0 ; i < dcb->entries; i++) {
370 struct dcb_entry *entry = &dcb->entry[i]; 330 struct dcb_output *entry = &dcb->entry[i];
371 331
372 if (entry->location != DCB_LOC_ON_CHIP) { 332 if (entry->location != DCB_LOC_ON_CHIP) {
373 NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n", 333 NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n",
374 entry->type, ffs(entry->or) - 1); 334 entry->type, ffs(entry->or) - 1);
375 continue; 335 continue;
376 } 336 }
@@ -380,16 +340,16 @@ nv50_display_create(struct drm_device *dev)
380 continue; 340 continue;
381 341
382 switch (entry->type) { 342 switch (entry->type) {
383 case OUTPUT_TMDS: 343 case DCB_OUTPUT_TMDS:
384 case OUTPUT_LVDS: 344 case DCB_OUTPUT_LVDS:
385 case OUTPUT_DP: 345 case DCB_OUTPUT_DP:
386 nv50_sor_create(connector, entry); 346 nv50_sor_create(connector, entry);
387 break; 347 break;
388 case OUTPUT_ANALOG: 348 case DCB_OUTPUT_ANALOG:
389 nv50_dac_create(connector, entry); 349 nv50_dac_create(connector, entry);
390 break; 350 break;
391 default: 351 default:
392 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); 352 NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
393 continue; 353 continue;
394 } 354 }
395 } 355 }
@@ -397,14 +357,13 @@ nv50_display_create(struct drm_device *dev)
397 list_for_each_entry_safe(connector, ct, 357 list_for_each_entry_safe(connector, ct,
398 &dev->mode_config.connector_list, head) { 358 &dev->mode_config.connector_list, head) {
399 if (!connector->encoder_ids[0]) { 359 if (!connector->encoder_ids[0]) {
400 NV_WARN(dev, "%s has no encoders, removing\n", 360 NV_WARN(drm, "%s has no encoders, removing\n",
401 drm_get_connector_name(connector)); 361 drm_get_connector_name(connector));
402 connector->funcs->destroy(connector); 362 connector->funcs->destroy(connector);
403 } 363 }
404 } 364 }
405 365
406 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); 366 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
407 nouveau_irq_register(dev, 26, nv50_display_isr);
408 367
409 ret = nv50_evo_create(dev); 368 ret = nv50_evo_create(dev);
410 if (ret) { 369 if (ret) {
@@ -420,13 +379,16 @@ nv50_display_destroy(struct drm_device *dev)
420{ 379{
421 struct nv50_display *disp = nv50_display(dev); 380 struct nv50_display *disp = nv50_display(dev);
422 381
423 NV_DEBUG_KMS(dev, "\n");
424
425 nv50_evo_destroy(dev); 382 nv50_evo_destroy(dev);
426 nouveau_irq_unregister(dev, 26);
427 kfree(disp); 383 kfree(disp);
428} 384}
429 385
386struct nouveau_bo *
387nv50_display_crtc_sema(struct drm_device *dev, int crtc)
388{
389 return nv50_display(dev)->crtc[crtc].sem.bo;
390}
391
430void 392void
431nv50_display_flip_stop(struct drm_crtc *crtc) 393nv50_display_flip_stop(struct drm_crtc *crtc)
432{ 394{
@@ -457,7 +419,7 @@ int
457nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, 419nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
458 struct nouveau_channel *chan) 420 struct nouveau_channel *chan)
459{ 421{
460 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private; 422 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
461 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 423 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
462 struct nv50_display *disp = nv50_display(crtc->dev); 424 struct nv50_display *disp = nv50_display(crtc->dev);
463 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 425 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -477,7 +439,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
477 return ret; 439 return ret;
478 } 440 }
479 441
480 if (dev_priv->chipset < 0xc0) { 442 if (nv_device(drm->device)->chipset < 0xc0) {
481 BEGIN_NV04(chan, 0, 0x0060, 2); 443 BEGIN_NV04(chan, 0, 0x0060, 2);
482 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 444 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
483 OUT_RING (chan, dispc->sem.offset); 445 OUT_RING (chan, dispc->sem.offset);
@@ -487,12 +449,12 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
487 OUT_RING (chan, dispc->sem.offset ^ 0x10); 449 OUT_RING (chan, dispc->sem.offset ^ 0x10);
488 OUT_RING (chan, 0x74b1e000); 450 OUT_RING (chan, 0x74b1e000);
489 BEGIN_NV04(chan, 0, 0x0060, 1); 451 BEGIN_NV04(chan, 0, 0x0060, 1);
490 if (dev_priv->chipset < 0x84) 452 if (nv_device(drm->device)->chipset < 0x84)
491 OUT_RING (chan, NvSema); 453 OUT_RING (chan, NvSema);
492 else 454 else
493 OUT_RING (chan, chan->vram_handle); 455 OUT_RING (chan, chan->vram);
494 } else { 456 } else {
495 u64 offset = nvc0_software_crtc(chan, nv_crtc->index); 457 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
496 offset += dispc->sem.offset; 458 offset += dispc->sem.offset;
497 BEGIN_NVC0(chan, 0, 0x0010, 4); 459 BEGIN_NVC0(chan, 0, 0x0010, 4);
498 OUT_RING (chan, upper_32_bits(offset)); 460 OUT_RING (chan, upper_32_bits(offset));
@@ -555,13 +517,13 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
555} 517}
556 518
557static u16 519static u16
558nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, 520nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
559 u32 mc, int pxclk) 521 u32 mc, int pxclk)
560{ 522{
561 struct drm_nouveau_private *dev_priv = dev->dev_private; 523 struct nouveau_drm *drm = nouveau_drm(dev);
562 struct nouveau_connector *nv_connector = NULL; 524 struct nouveau_connector *nv_connector = NULL;
563 struct drm_encoder *encoder; 525 struct drm_encoder *encoder;
564 struct nvbios *bios = &dev_priv->vbios; 526 struct nvbios *bios = &drm->vbios;
565 u32 script = 0, or; 527 u32 script = 0, or;
566 528
567 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 529 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -576,7 +538,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
576 538
577 or = ffs(dcb->or) - 1; 539 or = ffs(dcb->or) - 1;
578 switch (dcb->type) { 540 switch (dcb->type) {
579 case OUTPUT_LVDS: 541 case DCB_OUTPUT_LVDS:
580 script = (mc >> 8) & 0xf; 542 script = (mc >> 8) & 0xf;
581 if (bios->fp_no_ddc) { 543 if (bios->fp_no_ddc) {
582 if (bios->fp.dual_link) 544 if (bios->fp.dual_link)
@@ -609,34 +571,20 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
609 (nv_connector->edid->input & 0x70) >= 0x20) 571 (nv_connector->edid->input & 0x70) >= 0x20)
610 script |= 0x0200; 572 script |= 0x0200;
611 } 573 }
612
613 if (nouveau_uscript_lvds >= 0) {
614 NV_INFO(dev, "override script 0x%04x with 0x%04x "
615 "for output LVDS-%d\n", script,
616 nouveau_uscript_lvds, or);
617 script = nouveau_uscript_lvds;
618 }
619 break; 574 break;
620 case OUTPUT_TMDS: 575 case DCB_OUTPUT_TMDS:
621 script = (mc >> 8) & 0xf; 576 script = (mc >> 8) & 0xf;
622 if (pxclk >= 165000) 577 if (pxclk >= 165000)
623 script |= 0x0100; 578 script |= 0x0100;
624
625 if (nouveau_uscript_tmds >= 0) {
626 NV_INFO(dev, "override script 0x%04x with 0x%04x "
627 "for output TMDS-%d\n", script,
628 nouveau_uscript_tmds, or);
629 script = nouveau_uscript_tmds;
630 }
631 break; 579 break;
632 case OUTPUT_DP: 580 case DCB_OUTPUT_DP:
633 script = (mc >> 8) & 0xf; 581 script = (mc >> 8) & 0xf;
634 break; 582 break;
635 case OUTPUT_ANALOG: 583 case DCB_OUTPUT_ANALOG:
636 script = 0xff; 584 script = 0xff;
637 break; 585 break;
638 default: 586 default:
639 NV_ERROR(dev, "modeset on unsupported output type!\n"); 587 NV_ERROR(drm, "modeset on unsupported output type!\n");
640 break; 588 break;
641 } 589 }
642 590
@@ -644,59 +592,18 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
644} 592}
645 593
646static void 594static void
647nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
648{
649 struct drm_nouveau_private *dev_priv = dev->dev_private;
650 struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
651 struct nouveau_software_chan *pch, *tmp;
652
653 list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
654 if (pch->vblank.head != crtc)
655 continue;
656
657 spin_lock(&psw->peephole_lock);
658 nv_wr32(dev, 0x001704, pch->vblank.channel);
659 nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
660 if (dev_priv->chipset == 0x50) {
661 nv_wr32(dev, 0x001570, pch->vblank.offset);
662 nv_wr32(dev, 0x001574, pch->vblank.value);
663 } else {
664 nv_wr32(dev, 0x060010, pch->vblank.offset);
665 nv_wr32(dev, 0x060014, pch->vblank.value);
666 }
667 spin_unlock(&psw->peephole_lock);
668
669 list_del(&pch->vblank.list);
670 drm_vblank_put(dev, crtc);
671 }
672
673 drm_handle_vblank(dev, crtc);
674}
675
676static void
677nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
678{
679 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
680 nv50_display_vblank_crtc_handler(dev, 0);
681
682 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
683 nv50_display_vblank_crtc_handler(dev, 1);
684
685 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
686}
687
688static void
689nv50_display_unk10_handler(struct drm_device *dev) 595nv50_display_unk10_handler(struct drm_device *dev)
690{ 596{
691 struct drm_nouveau_private *dev_priv = dev->dev_private; 597 struct nouveau_device *device = nouveau_dev(dev);
598 struct nouveau_drm *drm = nouveau_drm(dev);
692 struct nv50_display *disp = nv50_display(dev); 599 struct nv50_display *disp = nv50_display(dev);
693 u32 unk30 = nv_rd32(dev, 0x610030), mc; 600 u32 unk30 = nv_rd32(device, 0x610030), mc;
694 int i, crtc, or = 0, type = OUTPUT_ANY; 601 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
695 602
696 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 603 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
697 disp->irq.dcb = NULL; 604 disp->irq.dcb = NULL;
698 605
699 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); 606 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8);
700 607
701 /* Determine which CRTC we're dealing with, only 1 ever will be 608 /* Determine which CRTC we're dealing with, only 1 ever will be
702 * signalled at the same time with the current nouveau code. 609 * signalled at the same time with the current nouveau code.
@@ -711,44 +618,44 @@ nv50_display_unk10_handler(struct drm_device *dev)
711 goto ack; 618 goto ack;
712 619
713 /* Find which encoder was connected to the CRTC */ 620 /* Find which encoder was connected to the CRTC */
714 for (i = 0; type == OUTPUT_ANY && i < 3; i++) { 621 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
715 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 622 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
716 NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); 623 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
717 if (!(mc & (1 << crtc))) 624 if (!(mc & (1 << crtc)))
718 continue; 625 continue;
719 626
720 switch ((mc & 0x00000f00) >> 8) { 627 switch ((mc & 0x00000f00) >> 8) {
721 case 0: type = OUTPUT_ANALOG; break; 628 case 0: type = DCB_OUTPUT_ANALOG; break;
722 case 1: type = OUTPUT_TV; break; 629 case 1: type = DCB_OUTPUT_TV; break;
723 default: 630 default:
724 NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 631 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
725 goto ack; 632 goto ack;
726 } 633 }
727 634
728 or = i; 635 or = i;
729 } 636 }
730 637
731 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 638 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
732 if (dev_priv->chipset < 0x90 || 639 if (nv_device(drm->device)->chipset < 0x90 ||
733 dev_priv->chipset == 0x92 || 640 nv_device(drm->device)->chipset == 0x92 ||
734 dev_priv->chipset == 0xa0) 641 nv_device(drm->device)->chipset == 0xa0)
735 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 642 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
736 else 643 else
737 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 644 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
738 645
739 NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); 646 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
740 if (!(mc & (1 << crtc))) 647 if (!(mc & (1 << crtc)))
741 continue; 648 continue;
742 649
743 switch ((mc & 0x00000f00) >> 8) { 650 switch ((mc & 0x00000f00) >> 8) {
744 case 0: type = OUTPUT_LVDS; break; 651 case 0: type = DCB_OUTPUT_LVDS; break;
745 case 1: type = OUTPUT_TMDS; break; 652 case 1: type = DCB_OUTPUT_TMDS; break;
746 case 2: type = OUTPUT_TMDS; break; 653 case 2: type = DCB_OUTPUT_TMDS; break;
747 case 5: type = OUTPUT_TMDS; break; 654 case 5: type = DCB_OUTPUT_TMDS; break;
748 case 8: type = OUTPUT_DP; break; 655 case 8: type = DCB_OUTPUT_DP; break;
749 case 9: type = OUTPUT_DP; break; 656 case 9: type = DCB_OUTPUT_DP; break;
750 default: 657 default:
751 NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 658 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
752 goto ack; 659 goto ack;
753 } 660 }
754 661
@@ -756,12 +663,12 @@ nv50_display_unk10_handler(struct drm_device *dev)
756 } 663 }
757 664
758 /* There was no encoder to disable */ 665 /* There was no encoder to disable */
759 if (type == OUTPUT_ANY) 666 if (type == DCB_OUTPUT_ANY)
760 goto ack; 667 goto ack;
761 668
762 /* Disable the encoder */ 669 /* Disable the encoder */
763 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 670 for (i = 0; i < drm->vbios.dcb.entries; i++) {
764 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; 671 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
765 672
766 if (dcb->type == type && (dcb->or & (1 << or))) { 673 if (dcb->type == type && (dcb->or & (1 << or))) {
767 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); 674 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1);
@@ -770,22 +677,23 @@ nv50_display_unk10_handler(struct drm_device *dev)
770 } 677 }
771 } 678 }
772 679
773 NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); 680 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
774ack: 681ack:
775 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); 682 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
776 nv_wr32(dev, 0x610030, 0x80000000); 683 nv_wr32(device, 0x610030, 0x80000000);
777} 684}
778 685
779static void 686static void
780nv50_display_unk20_handler(struct drm_device *dev) 687nv50_display_unk20_handler(struct drm_device *dev)
781{ 688{
782 struct drm_nouveau_private *dev_priv = dev->dev_private; 689 struct nouveau_device *device = nouveau_dev(dev);
690 struct nouveau_drm *drm = nouveau_drm(dev);
783 struct nv50_display *disp = nv50_display(dev); 691 struct nv50_display *disp = nv50_display(dev);
784 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 692 u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0;
785 struct dcb_entry *dcb; 693 struct dcb_output *dcb;
786 int i, crtc, or = 0, type = OUTPUT_ANY; 694 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
787 695
788 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 696 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
789 dcb = disp->irq.dcb; 697 dcb = disp->irq.dcb;
790 if (dcb) { 698 if (dcb) {
791 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1); 699 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
@@ -795,86 +703,86 @@ nv50_display_unk20_handler(struct drm_device *dev)
795 /* CRTC clock change requested? */ 703 /* CRTC clock change requested? */
796 crtc = ffs((unk30 & 0x00000600) >> 9) - 1; 704 crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
797 if (crtc >= 0) { 705 if (crtc >= 0) {
798 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); 706 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
799 pclk &= 0x003fffff; 707 pclk &= 0x003fffff;
800 if (pclk) 708 if (pclk)
801 nv50_crtc_set_clock(dev, crtc, pclk); 709 nv50_crtc_set_clock(dev, crtc, pclk);
802 710
803 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); 711 tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
804 tmp &= ~0x000000f; 712 tmp &= ~0x000000f;
805 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); 713 nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
806 } 714 }
807 715
808 /* Nothing needs to be done for the encoder */ 716 /* Nothing needs to be done for the encoder */
809 crtc = ffs((unk30 & 0x00000180) >> 7) - 1; 717 crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
810 if (crtc < 0) 718 if (crtc < 0)
811 goto ack; 719 goto ack;
812 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; 720 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
813 721
814 /* Find which encoder is connected to the CRTC */ 722 /* Find which encoder is connected to the CRTC */
815 for (i = 0; type == OUTPUT_ANY && i < 3; i++) { 723 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
816 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); 724 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
817 NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); 725 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
818 if (!(mc & (1 << crtc))) 726 if (!(mc & (1 << crtc)))
819 continue; 727 continue;
820 728
821 switch ((mc & 0x00000f00) >> 8) { 729 switch ((mc & 0x00000f00) >> 8) {
822 case 0: type = OUTPUT_ANALOG; break; 730 case 0: type = DCB_OUTPUT_ANALOG; break;
823 case 1: type = OUTPUT_TV; break; 731 case 1: type = DCB_OUTPUT_TV; break;
824 default: 732 default:
825 NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 733 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
826 goto ack; 734 goto ack;
827 } 735 }
828 736
829 or = i; 737 or = i;
830 } 738 }
831 739
832 for (i = 0; type == OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 740 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) {
833 if (dev_priv->chipset < 0x90 || 741 if (nv_device(drm->device)->chipset < 0x90 ||
834 dev_priv->chipset == 0x92 || 742 nv_device(drm->device)->chipset == 0x92 ||
835 dev_priv->chipset == 0xa0) 743 nv_device(drm->device)->chipset == 0xa0)
836 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); 744 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
837 else 745 else
838 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); 746 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
839 747
840 NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); 748 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc);
841 if (!(mc & (1 << crtc))) 749 if (!(mc & (1 << crtc)))
842 continue; 750 continue;
843 751
844 switch ((mc & 0x00000f00) >> 8) { 752 switch ((mc & 0x00000f00) >> 8) {
845 case 0: type = OUTPUT_LVDS; break; 753 case 0: type = DCB_OUTPUT_LVDS; break;
846 case 1: type = OUTPUT_TMDS; break; 754 case 1: type = DCB_OUTPUT_TMDS; break;
847 case 2: type = OUTPUT_TMDS; break; 755 case 2: type = DCB_OUTPUT_TMDS; break;
848 case 5: type = OUTPUT_TMDS; break; 756 case 5: type = DCB_OUTPUT_TMDS; break;
849 case 8: type = OUTPUT_DP; break; 757 case 8: type = DCB_OUTPUT_DP; break;
850 case 9: type = OUTPUT_DP; break; 758 case 9: type = DCB_OUTPUT_DP; break;
851 default: 759 default:
852 NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 760 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
853 goto ack; 761 goto ack;
854 } 762 }
855 763
856 or = i; 764 or = i;
857 } 765 }
858 766
859 if (type == OUTPUT_ANY) 767 if (type == DCB_OUTPUT_ANY)
860 goto ack; 768 goto ack;
861 769
862 /* Enable the encoder */ 770 /* Enable the encoder */
863 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 771 for (i = 0; i < drm->vbios.dcb.entries; i++) {
864 dcb = &dev_priv->vbios.dcb.entry[i]; 772 dcb = &drm->vbios.dcb.entry[i];
865 if (dcb->type == type && (dcb->or & (1 << or))) 773 if (dcb->type == type && (dcb->or & (1 << or)))
866 break; 774 break;
867 } 775 }
868 776
869 if (i == dev_priv->vbios.dcb.entries) { 777 if (i == drm->vbios.dcb.entries) {
870 NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); 778 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc);
871 goto ack; 779 goto ack;
872 } 780 }
873 781
874 script = nv50_display_script_select(dev, dcb, mc, pclk); 782 script = nv50_display_script_select(dev, dcb, mc, pclk);
875 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); 783 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1);
876 784
877 if (type == OUTPUT_DP) { 785 if (type == DCB_OUTPUT_DP) {
878 int link = !(dcb->dpconf.sor.link & 1); 786 int link = !(dcb->dpconf.sor.link & 1);
879 if ((mc & 0x000f0000) == 0x00020000) 787 if ((mc & 0x000f0000) == 0x00020000)
880 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); 788 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
@@ -882,14 +790,14 @@ nv50_display_unk20_handler(struct drm_device *dev)
882 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); 790 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
883 } 791 }
884 792
885 if (dcb->type != OUTPUT_ANALOG) { 793 if (dcb->type != DCB_OUTPUT_ANALOG) {
886 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); 794 tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
887 tmp &= ~0x00000f0f; 795 tmp &= ~0x00000f0f;
888 if (script & 0x0100) 796 if (script & 0x0100)
889 tmp |= 0x00000101; 797 tmp |= 0x00000101;
890 nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); 798 nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
891 } else { 799 } else {
892 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); 800 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
893 } 801 }
894 802
895 disp->irq.dcb = dcb; 803 disp->irq.dcb = dcb;
@@ -897,8 +805,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
897 disp->irq.script = script; 805 disp->irq.script = script;
898 806
899ack: 807ack:
900 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); 808 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
901 nv_wr32(dev, 0x610030, 0x80000000); 809 nv_wr32(device, 0x610030, 0x80000000);
902} 810}
903 811
904/* If programming a TMDS output on a SOR that can also be configured for 812/* If programming a TMDS output on a SOR that can also be configured for
@@ -910,23 +818,24 @@ ack:
910 * programmed for DisplayPort. 818 * programmed for DisplayPort.
911 */ 819 */
912static void 820static void
913nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb) 821nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb)
914{ 822{
823 struct nouveau_device *device = nouveau_dev(dev);
915 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); 824 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
916 struct drm_encoder *encoder; 825 struct drm_encoder *encoder;
917 u32 tmp; 826 u32 tmp;
918 827
919 if (dcb->type != OUTPUT_TMDS) 828 if (dcb->type != DCB_OUTPUT_TMDS)
920 return; 829 return;
921 830
922 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
923 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 832 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
924 833
925 if (nv_encoder->dcb->type == OUTPUT_DP && 834 if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
926 nv_encoder->dcb->or & (1 << or)) { 835 nv_encoder->dcb->or & (1 << or)) {
927 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); 836 tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link));
928 tmp &= ~NV50_SOR_DP_CTRL_ENABLED; 837 tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
929 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); 838 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp);
930 break; 839 break;
931 } 840 }
932 } 841 }
@@ -935,12 +844,14 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
935static void 844static void
936nv50_display_unk40_handler(struct drm_device *dev) 845nv50_display_unk40_handler(struct drm_device *dev)
937{ 846{
847 struct nouveau_device *device = nouveau_dev(dev);
848 struct nouveau_drm *drm = nouveau_drm(dev);
938 struct nv50_display *disp = nv50_display(dev); 849 struct nv50_display *disp = nv50_display(dev);
939 struct dcb_entry *dcb = disp->irq.dcb; 850 struct dcb_output *dcb = disp->irq.dcb;
940 u16 script = disp->irq.script; 851 u16 script = disp->irq.script;
941 u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk; 852 u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk;
942 853
943 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 854 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
944 disp->irq.dcb = NULL; 855 disp->irq.dcb = NULL;
945 if (!dcb) 856 if (!dcb)
946 goto ack; 857 goto ack;
@@ -949,21 +860,23 @@ nv50_display_unk40_handler(struct drm_device *dev)
949 nv50_display_unk40_dp_set_tmds(dev, dcb); 860 nv50_display_unk40_dp_set_tmds(dev, dcb);
950 861
951ack: 862ack:
952 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); 863 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
953 nv_wr32(dev, 0x610030, 0x80000000); 864 nv_wr32(device, 0x610030, 0x80000000);
954 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8); 865 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8);
955} 866}
956 867
957static void 868static void
958nv50_display_bh(unsigned long data) 869nv50_display_bh(unsigned long data)
959{ 870{
960 struct drm_device *dev = (struct drm_device *)data; 871 struct drm_device *dev = (struct drm_device *)data;
872 struct nouveau_device *device = nouveau_dev(dev);
873 struct nouveau_drm *drm = nouveau_drm(dev);
961 874
962 for (;;) { 875 for (;;) {
963 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 876 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
964 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 877 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
965 878
966 NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); 879 NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
967 880
968 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) 881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
969 nv50_display_unk10_handler(dev); 882 nv50_display_unk10_handler(dev);
@@ -977,13 +890,15 @@ nv50_display_bh(unsigned long data)
977 break; 890 break;
978 } 891 }
979 892
980 nv_wr32(dev, NV03_PMC_INTR_EN_0, 1); 893 nv_wr32(device, NV03_PMC_INTR_EN_0, 1);
981} 894}
982 895
983static void 896static void
984nv50_display_error_handler(struct drm_device *dev) 897nv50_display_error_handler(struct drm_device *dev)
985{ 898{
986 u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; 899 struct nouveau_device *device = nouveau_dev(dev);
900 struct nouveau_drm *drm = nouveau_drm(dev);
901 u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
987 u32 addr, data; 902 u32 addr, data;
988 int chid; 903 int chid;
989 904
@@ -991,29 +906,31 @@ nv50_display_error_handler(struct drm_device *dev)
991 if (!(channels & (1 << chid))) 906 if (!(channels & (1 << chid)))
992 continue; 907 continue;
993 908
994 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); 909 nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
995 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); 910 addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid));
996 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); 911 data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid));
997 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " 912 NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x "
998 "(0x%04x 0x%02x)\n", chid, 913 "(0x%04x 0x%02x)\n", chid,
999 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); 914 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
1000 915
1001 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); 916 nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
1002 } 917 }
1003} 918}
1004 919
1005static void 920void
1006nv50_display_isr(struct drm_device *dev) 921nv50_display_intr(struct drm_device *dev)
1007{ 922{
923 struct nouveau_device *device = nouveau_dev(dev);
924 struct nouveau_drm *drm = nouveau_drm(dev);
1008 struct nv50_display *disp = nv50_display(dev); 925 struct nv50_display *disp = nv50_display(dev);
1009 uint32_t delayed = 0; 926 uint32_t delayed = 0;
1010 927
1011 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 928 while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
1012 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 929 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0);
1013 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); 930 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
1014 uint32_t clock; 931 uint32_t clock;
1015 932
1016 NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); 933 NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
1017 934
1018 if (!intr0 && !(intr1 & ~delayed)) 935 if (!intr0 && !(intr1 & ~delayed))
1019 break; 936 break;
@@ -1024,29 +941,29 @@ nv50_display_isr(struct drm_device *dev)
1024 } 941 }
1025 942
1026 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 943 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
1027 nv50_display_vblank_handler(dev, intr1);
1028 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 944 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
945 delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
1029 } 946 }
1030 947
1031 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | 948 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
1032 NV50_PDISPLAY_INTR_1_CLK_UNK20 | 949 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
1033 NV50_PDISPLAY_INTR_1_CLK_UNK40)); 950 NV50_PDISPLAY_INTR_1_CLK_UNK40));
1034 if (clock) { 951 if (clock) {
1035 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); 952 nv_wr32(device, NV03_PMC_INTR_EN_0, 0);
1036 tasklet_schedule(&disp->tasklet); 953 tasklet_schedule(&disp->tasklet);
1037 delayed |= clock; 954 delayed |= clock;
1038 intr1 &= ~clock; 955 intr1 &= ~clock;
1039 } 956 }
1040 957
1041 if (intr0) { 958 if (intr0) {
1042 NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); 959 NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
1043 nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0); 960 nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0);
1044 } 961 }
1045 962
1046 if (intr1) { 963 if (intr1) {
1047 NV_ERROR(dev, 964 NV_ERROR(drm,
1048 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); 965 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
1049 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1); 966 nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1);
1050 } 967 }
1051 } 968 }
1052} 969}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 009ec2a811c4..973554d8a7a6 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -27,12 +27,9 @@
27#ifndef __NV50_DISPLAY_H__ 27#ifndef __NV50_DISPLAY_H__
28#define __NV50_DISPLAY_H__ 28#define __NV50_DISPLAY_H__
29 29
30#include <drm/drmP.h> 30#include "nouveau_display.h"
31#include "nouveau_drv.h"
32#include "nouveau_dma.h"
33#include "nouveau_reg.h"
34#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
35#include "nouveau_software.h" 32#include "nouveau_reg.h"
36#include "nv50_evo.h" 33#include "nv50_evo.h"
37 34
38struct nv50_display_crtc { 35struct nv50_display_crtc {
@@ -46,13 +43,16 @@ struct nv50_display_crtc {
46 43
47struct nv50_display { 44struct nv50_display {
48 struct nouveau_channel *master; 45 struct nouveau_channel *master;
49 struct nouveau_gpuobj *ntfy; 46
47 struct nouveau_gpuobj *ramin;
48 u32 dmao;
49 u32 hash;
50 50
51 struct nv50_display_crtc crtc[2]; 51 struct nv50_display_crtc crtc[2];
52 52
53 struct tasklet_struct tasklet; 53 struct tasklet_struct tasklet;
54 struct { 54 struct {
55 struct dcb_entry *dcb; 55 struct dcb_output *dcb;
56 u16 script; 56 u16 script;
57 u32 pclk; 57 u32 pclk;
58 } irq; 58 } irq;
@@ -61,8 +61,7 @@ struct nv50_display {
61static inline struct nv50_display * 61static inline struct nv50_display *
62nv50_display(struct drm_device *dev) 62nv50_display(struct drm_device *dev)
63{ 63{
64 struct drm_nouveau_private *dev_priv = dev->dev_private; 64 return nouveau_display(dev)->priv;
65 return dev_priv->engine.display.priv;
66} 65}
67 66
68int nv50_display_early_init(struct drm_device *dev); 67int nv50_display_early_init(struct drm_device *dev);
@@ -71,6 +70,7 @@ int nv50_display_create(struct drm_device *dev);
71int nv50_display_init(struct drm_device *dev); 70int nv50_display_init(struct drm_device *dev);
72void nv50_display_fini(struct drm_device *dev); 71void nv50_display_fini(struct drm_device *dev);
73void nv50_display_destroy(struct drm_device *dev); 72void nv50_display_destroy(struct drm_device *dev);
73void nv50_display_intr(struct drm_device *);
74int nv50_crtc_blank(struct nouveau_crtc *, bool blank); 74int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); 75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
76 76
@@ -90,4 +90,17 @@ void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
90int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype, 90int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
91 u64 base, u64 size, struct nouveau_gpuobj **); 91 u64 base, u64 size, struct nouveau_gpuobj **);
92 92
93int nvd0_display_create(struct drm_device *);
94void nvd0_display_destroy(struct drm_device *);
95int nvd0_display_init(struct drm_device *);
96void nvd0_display_fini(struct drm_device *);
97void nvd0_display_intr(struct drm_device *);
98
99void nvd0_display_flip_stop(struct drm_crtc *);
100int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
101 struct nouveau_channel *, u32 swap_interval);
102
103struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
104struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
105
93#endif /* __NV50_DISPLAY_H__ */ 106#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index dabcd8787176..9f6f55cdfa77 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -24,11 +24,29 @@
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drm.h"
28#include "nouveau_dma.h" 28#include "nouveau_dma.h"
29#include "nouveau_ramht.h"
30#include "nv50_display.h" 29#include "nv50_display.h"
31 30
31#include <core/gpuobj.h>
32
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32
37nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38{
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41}
42
43static void
44nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45{
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48}
49
32static void 50static void
33nv50_evo_channel_del(struct nouveau_channel **pevo) 51nv50_evo_channel_del(struct nouveau_channel **pevo)
34{ 52{
@@ -38,26 +56,29 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
38 return; 56 return;
39 *pevo = NULL; 57 *pevo = NULL;
40 58
41 nouveau_ramht_ref(NULL, &evo->ramht, evo); 59 nouveau_bo_unmap(evo->push.buffer);
42 nouveau_gpuobj_channel_takedown(evo); 60 nouveau_bo_ref(NULL, &evo->push.buffer);
43 nouveau_bo_unmap(evo->pushbuf_bo);
44 nouveau_bo_ref(NULL, &evo->pushbuf_bo);
45 61
46 if (evo->user) 62 if (evo->object)
47 iounmap(evo->user); 63 iounmap(evo->object->oclass->ofuncs);
48 64
49 kfree(evo); 65 kfree(evo);
50} 66}
51 67
52void 68int
53nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size) 69nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
54{ 71{
55 struct drm_nouveau_private *dev_priv = obj->dev->dev_private; 72 struct drm_device *dev = evo->fence;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
56 u32 flags5; 77 u32 flags5;
57 78
58 if (dev_priv->chipset < 0xc0) { 79 if (nv_device(drm->device)->chipset < 0xc0) {
59 /* not supported on 0x50, specified in format mthd */ 80 /* not supported on 0x50, specified in format mthd */
60 if (dev_priv->chipset == 0x50) 81 if (nv_device(drm->device)->chipset == 0x50)
61 memtype = 0; 82 memtype = 0;
62 flags5 = 0x00010000; 83 flags5 = 0x00010000;
63 } else { 84 } else {
@@ -67,42 +88,28 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
67 flags5 = 0x00020000; 88 flags5 = 0x00020000;
68 } 89 }
69 90
70 nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM, 91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
71 NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0); 92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
72 nv_wo32(obj, 0x14, flags5); 93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
73 dev_priv->engine.instmem.flush(obj->dev); 94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
74} 95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
75 98
76int 99 nv_wo32(disp->ramin, hash + 0x00, handle);
77nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype, 100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
78 u64 base, u64 size, struct nouveau_gpuobj **pobj) 101 evo->handle);
79{
80 struct nv50_display *disp = nv50_display(evo->dev);
81 struct nouveau_gpuobj *obj = NULL;
82 int ret;
83
84 ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
85 if (ret)
86 return ret;
87 obj->engine = NVOBJ_ENGINE_DISPLAY;
88
89 nv50_evo_dmaobj_init(obj, memtype, base, size);
90
91 ret = nouveau_ramht_insert(evo, handle, obj);
92 if (ret)
93 goto out;
94 102
95 if (pobj) 103 disp->dmao += 0x20;
96 nouveau_gpuobj_ref(obj, pobj); 104 disp->hash += 0x08;
97out: 105 return 0;
98 nouveau_gpuobj_ref(NULL, &obj);
99 return ret;
100} 106}
101 107
102static int 108static int
103nv50_evo_channel_new(struct drm_device *dev, int chid, 109nv50_evo_channel_new(struct drm_device *dev, int chid,
104 struct nouveau_channel **pevo) 110 struct nouveau_channel **pevo)
105{ 111{
112 struct nouveau_drm *drm = nouveau_drm(dev);
106 struct nv50_display *disp = nv50_display(dev); 113 struct nv50_display *disp = nv50_display(dev);
107 struct nouveau_channel *evo; 114 struct nouveau_channel *evo;
108 int ret; 115 int ret;
@@ -112,79 +119,84 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
112 return -ENOMEM; 119 return -ENOMEM;
113 *pevo = evo; 120 *pevo = evo;
114 121
115 evo->id = chid; 122 evo->drm = drm;
116 evo->dev = dev; 123 evo->handle = chid;
124 evo->fence = dev;
117 evo->user_get = 4; 125 evo->user_get = 4;
118 evo->user_put = 0; 126 evo->user_put = 0;
119 127
120 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL, 128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
121 &evo->pushbuf_bo); 129 &evo->push.buffer);
122 if (ret == 0) 130 if (ret == 0)
123 ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); 131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
124 if (ret) { 132 if (ret) {
125 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); 133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
126 nv50_evo_channel_del(pevo); 134 nv50_evo_channel_del(pevo);
127 return ret; 135 return ret;
128 } 136 }
129 137
130 ret = nouveau_bo_map(evo->pushbuf_bo); 138 ret = nouveau_bo_map(evo->push.buffer);
131 if (ret) { 139 if (ret) {
132 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); 140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
133 nv50_evo_channel_del(pevo); 141 nv50_evo_channel_del(pevo);
134 return ret; 142 return ret;
135 } 143 }
136 144
137 evo->user = ioremap(pci_resource_start(dev->pdev, 0) + 145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
138 NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); 146#ifdef NOUVEAU_OBJECT_MAGIC
139 if (!evo->user) { 147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
140 NV_ERROR(dev, "Error mapping EVO control regs.\n"); 148#endif
141 nv50_evo_channel_del(pevo); 149 evo->object->parent = nv_object(disp->ramin)->parent;
142 return -ENOMEM; 150 evo->object->engine = nv_object(disp->ramin)->engine;
143 } 151 evo->object->oclass =
144 152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
145 /* bind primary evo channel's ramht to the channel */ 153 evo->object->oclass->ofuncs =
146 if (disp->master && evo != disp->master) 154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
147 nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL); 155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
148 156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
149 return 0; 160 return 0;
150} 161}
151 162
152static int 163static int
153nv50_evo_channel_init(struct nouveau_channel *evo) 164nv50_evo_channel_init(struct nouveau_channel *evo)
154{ 165{
155 struct drm_device *dev = evo->dev; 166 struct nouveau_drm *drm = evo->drm;
156 int id = evo->id, ret, i; 167 struct nouveau_device *device = nv_device(drm->device);
157 u64 pushbuf = evo->pushbuf_bo->bo.offset; 168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
158 u32 tmp; 170 u32 tmp;
159 171
160 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
161 if ((tmp & 0x009f0000) == 0x00020000) 173 if ((tmp & 0x009f0000) == 0x00020000)
162 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); 174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
163 175
164 tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); 176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
165 if ((tmp & 0x003f0000) == 0x00030000) 177 if ((tmp & 0x003f0000) == 0x00030000)
166 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); 178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
167 179
168 /* initialise fifo */ 180 /* initialise fifo */
169 nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | 181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
170 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | 182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
171 NV50_PDISPLAY_EVO_DMA_CB_VALID); 183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
172 nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); 184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
173 nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); 185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
174 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, 186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
175 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); 187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
176 188
177 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); 189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
178 nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | 190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
179 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); 191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
180 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { 192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
181 NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, 193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
182 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); 194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
183 return -EBUSY; 195 return -EBUSY;
184 } 196 }
185 197
186 /* enable error reporting on the channel */ 198 /* enable error reporting on the channel */
187 nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); 199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
188 200
189 evo->dma.max = (4096/4) - 2; 201 evo->dma.max = (4096/4) - 2;
190 evo->dma.max &= ~7; 202 evo->dma.max &= ~7;
@@ -205,16 +217,17 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
205static void 217static void
206nv50_evo_channel_fini(struct nouveau_channel *evo) 218nv50_evo_channel_fini(struct nouveau_channel *evo)
207{ 219{
208 struct drm_device *dev = evo->dev; 220 struct nouveau_drm *drm = evo->drm;
209 int id = evo->id; 221 struct nouveau_device *device = nv_device(drm->device);
210 222 int id = evo->handle;
211 nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); 223
212 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); 224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
213 nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); 225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
214 nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); 226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
215 if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { 227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
216 NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, 228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
217 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); 229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
218 } 231 }
219} 232}
220 233
@@ -231,93 +244,66 @@ nv50_evo_destroy(struct drm_device *dev)
231 } 244 }
232 nv50_evo_channel_del(&disp->crtc[i].sync); 245 nv50_evo_channel_del(&disp->crtc[i].sync);
233 } 246 }
234 nouveau_gpuobj_ref(NULL, &disp->ntfy);
235 nv50_evo_channel_del(&disp->master); 247 nv50_evo_channel_del(&disp->master);
248 nouveau_gpuobj_ref(NULL, &disp->ramin);
236} 249}
237 250
238int 251int
239nv50_evo_create(struct drm_device *dev) 252nv50_evo_create(struct drm_device *dev)
240{ 253{
241 struct drm_nouveau_private *dev_priv = dev->dev_private; 254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
242 struct nv50_display *disp = nv50_display(dev); 256 struct nv50_display *disp = nv50_display(dev);
243 struct nouveau_gpuobj *ramht = NULL;
244 struct nouveau_channel *evo; 257 struct nouveau_channel *evo;
245 int ret, i, j; 258 int ret, i, j;
246 259
247 /* create primary evo channel, the one we use for modesetting
248 * purporses
249 */
250 ret = nv50_evo_channel_new(dev, 0, &disp->master);
251 if (ret)
252 return ret;
253 evo = disp->master;
254
255 /* setup object management on it, any other evo channel will 260 /* setup object management on it, any other evo channel will
256 * use this also as there's no per-channel support on the 261 * use this also as there's no per-channel support on the
257 * hardware 262 * hardware
258 */ 263 */
259 ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, 264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
260 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); 265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
261 if (ret) {
262 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
263 goto err;
264 }
265
266 ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
267 if (ret) { 266 if (ret) {
268 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); 267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
269 goto err; 268 goto err;
270 } 269 }
271 270
272 ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); 271 disp->hash = 0x0000;
273 if (ret) { 272 disp->dmao = 0x1000;
274 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
275 goto err;
276 }
277
278 ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
279 nouveau_gpuobj_ref(NULL, &ramht);
280 if (ret)
281 goto err;
282 273
283 /* not sure exactly what this is.. 274 /* create primary evo channel, the one we use for modesetting
284 * 275 * purporses
285 * the first dword of the structure is used by nvidia to wait on
286 * full completion of an EVO "update" command.
287 *
288 * method 0x8c on the master evo channel will fill a lot more of
289 * this structure with some undefined info
290 */ 276 */
291 ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0, 277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
292 NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
293 if (ret) 278 if (ret)
294 goto err; 279 return ret;
280 evo = disp->master;
295 281
296 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000, 282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
297 disp->ntfy->vinst, disp->ntfy->size, NULL); 283 disp->ramin->addr + 0x2000, 0x1000, NULL);
298 if (ret) 284 if (ret)
299 goto err; 285 goto err;
300 286
301 /* create some default objects for the scanout memtypes we support */ 287 /* create some default objects for the scanout memtypes we support */
302 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000, 288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
303 0, dev_priv->vram_size, NULL); 289 0, pfb->ram.size, NULL);
304 if (ret) 290 if (ret)
305 goto err; 291 goto err;
306 292
307 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000, 293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
308 0, dev_priv->vram_size, NULL); 294 0, pfb->ram.size, NULL);
309 if (ret) 295 if (ret)
310 goto err; 296 goto err;
311 297
312 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 | 298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
313 (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00), 299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
314 0, dev_priv->vram_size, NULL); 300 0, pfb->ram.size, NULL);
315 if (ret) 301 if (ret)
316 goto err; 302 goto err;
317 303
318 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 | 304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
319 (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00), 305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
320 0, dev_priv->vram_size, NULL); 306 0, pfb->ram.size, NULL);
321 if (ret) 307 if (ret)
322 goto err; 308 goto err;
323 309
@@ -352,21 +338,21 @@ nv50_evo_create(struct drm_device *dev)
352 goto err; 338 goto err;
353 339
354 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000, 340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
355 0, dev_priv->vram_size, NULL); 341 0, pfb->ram.size, NULL);
356 if (ret) 342 if (ret)
357 goto err; 343 goto err;
358 344
359 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 | 345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
360 (dev_priv->chipset < 0xc0 ? 346 (nv_device(drm->device)->chipset < 0xc0 ?
361 0x7a00 : 0xfe00), 347 0x7a : 0xfe),
362 0, dev_priv->vram_size, NULL); 348 0, pfb->ram.size, NULL);
363 if (ret) 349 if (ret)
364 goto err; 350 goto err;
365 351
366 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 | 352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
367 (dev_priv->chipset < 0xc0 ? 353 (nv_device(drm->device)->chipset < 0xc0 ?
368 0x7000 : 0xfe00), 354 0x70 : 0xfe),
369 0, dev_priv->vram_size, NULL); 355 0, pfb->ram.size, NULL);
370 if (ret) 356 if (ret)
371 goto err; 357 goto err;
372 358
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
deleted file mode 100644
index befd5fb7155f..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ /dev/null
@@ -1,295 +0,0 @@
1#include <drm/drmP.h>
2#include "nouveau_drv.h"
3#include <drm/nouveau_drm.h>
4#include "nouveau_fifo.h"
5
6struct nv50_fb_priv {
7 struct page *r100c08_page;
8 dma_addr_t r100c08;
9};
10
11static void
12nv50_fb_destroy(struct drm_device *dev)
13{
14 struct drm_nouveau_private *dev_priv = dev->dev_private;
15 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
16 struct nv50_fb_priv *priv = pfb->priv;
17
18 if (drm_mm_initialized(&pfb->tag_heap))
19 drm_mm_takedown(&pfb->tag_heap);
20
21 if (priv->r100c08_page) {
22 pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
23 PCI_DMA_BIDIRECTIONAL);
24 __free_page(priv->r100c08_page);
25 }
26
27 kfree(priv);
28 pfb->priv = NULL;
29}
30
31static int
32nv50_fb_create(struct drm_device *dev)
33{
34 struct drm_nouveau_private *dev_priv = dev->dev_private;
35 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
36 struct nv50_fb_priv *priv;
37 u32 tagmem;
38 int ret;
39
40 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
41 if (!priv)
42 return -ENOMEM;
43 pfb->priv = priv;
44
45 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
46 if (!priv->r100c08_page) {
47 nv50_fb_destroy(dev);
48 return -ENOMEM;
49 }
50
51 priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
52 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
53 if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
54 nv50_fb_destroy(dev);
55 return -EFAULT;
56 }
57
58 tagmem = nv_rd32(dev, 0x100320);
59 NV_DEBUG(dev, "%d tags available\n", tagmem);
60 ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
61 if (ret) {
62 nv50_fb_destroy(dev);
63 return ret;
64 }
65
66 return 0;
67}
68
69int
70nv50_fb_init(struct drm_device *dev)
71{
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 struct nv50_fb_priv *priv;
74 int ret;
75
76 if (!dev_priv->engine.fb.priv) {
77 ret = nv50_fb_create(dev);
78 if (ret)
79 return ret;
80 }
81 priv = dev_priv->engine.fb.priv;
82
83 /* Not a clue what this is exactly. Without pointing it at a
84 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
85 * cause IOMMU "read from address 0" errors (rh#561267)
86 */
87 nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
88
89 /* This is needed to get meaningful information from 100c90
90 * on traps. No idea what these values mean exactly. */
91 switch (dev_priv->chipset) {
92 case 0x50:
93 nv_wr32(dev, 0x100c90, 0x000707ff);
94 break;
95 case 0xa3:
96 case 0xa5:
97 case 0xa8:
98 nv_wr32(dev, 0x100c90, 0x000d0fff);
99 break;
100 case 0xaf:
101 nv_wr32(dev, 0x100c90, 0x089d1fff);
102 break;
103 default:
104 nv_wr32(dev, 0x100c90, 0x001d07ff);
105 break;
106 }
107
108 return 0;
109}
110
111void
112nv50_fb_takedown(struct drm_device *dev)
113{
114 nv50_fb_destroy(dev);
115}
116
117static struct nouveau_enum vm_dispatch_subclients[] = {
118 { 0x00000000, "GRCTX", NULL },
119 { 0x00000001, "NOTIFY", NULL },
120 { 0x00000002, "QUERY", NULL },
121 { 0x00000003, "COND", NULL },
122 { 0x00000004, "M2M_IN", NULL },
123 { 0x00000005, "M2M_OUT", NULL },
124 { 0x00000006, "M2M_NOTIFY", NULL },
125 {}
126};
127
128static struct nouveau_enum vm_ccache_subclients[] = {
129 { 0x00000000, "CB", NULL },
130 { 0x00000001, "TIC", NULL },
131 { 0x00000002, "TSC", NULL },
132 {}
133};
134
135static struct nouveau_enum vm_prop_subclients[] = {
136 { 0x00000000, "RT0", NULL },
137 { 0x00000001, "RT1", NULL },
138 { 0x00000002, "RT2", NULL },
139 { 0x00000003, "RT3", NULL },
140 { 0x00000004, "RT4", NULL },
141 { 0x00000005, "RT5", NULL },
142 { 0x00000006, "RT6", NULL },
143 { 0x00000007, "RT7", NULL },
144 { 0x00000008, "ZETA", NULL },
145 { 0x00000009, "LOCAL", NULL },
146 { 0x0000000a, "GLOBAL", NULL },
147 { 0x0000000b, "STACK", NULL },
148 { 0x0000000c, "DST2D", NULL },
149 {}
150};
151
152static struct nouveau_enum vm_pfifo_subclients[] = {
153 { 0x00000000, "PUSHBUF", NULL },
154 { 0x00000001, "SEMAPHORE", NULL },
155 {}
156};
157
158static struct nouveau_enum vm_bar_subclients[] = {
159 { 0x00000000, "FB", NULL },
160 { 0x00000001, "IN", NULL },
161 {}
162};
163
164static struct nouveau_enum vm_client[] = {
165 { 0x00000000, "STRMOUT", NULL },
166 { 0x00000003, "DISPATCH", vm_dispatch_subclients },
167 { 0x00000004, "PFIFO_WRITE", NULL },
168 { 0x00000005, "CCACHE", vm_ccache_subclients },
169 { 0x00000006, "PPPP", NULL },
170 { 0x00000007, "CLIPID", NULL },
171 { 0x00000008, "PFIFO_READ", NULL },
172 { 0x00000009, "VFETCH", NULL },
173 { 0x0000000a, "TEXTURE", NULL },
174 { 0x0000000b, "PROP", vm_prop_subclients },
175 { 0x0000000c, "PVP", NULL },
176 { 0x0000000d, "PBSP", NULL },
177 { 0x0000000e, "PCRYPT", NULL },
178 { 0x0000000f, "PCOUNTER", NULL },
179 { 0x00000011, "PDAEMON", NULL },
180 {}
181};
182
183static struct nouveau_enum vm_engine[] = {
184 { 0x00000000, "PGRAPH", NULL },
185 { 0x00000001, "PVP", NULL },
186 { 0x00000004, "PEEPHOLE", NULL },
187 { 0x00000005, "PFIFO", vm_pfifo_subclients },
188 { 0x00000006, "BAR", vm_bar_subclients },
189 { 0x00000008, "PPPP", NULL },
190 { 0x00000009, "PBSP", NULL },
191 { 0x0000000a, "PCRYPT", NULL },
192 { 0x0000000b, "PCOUNTER", NULL },
193 { 0x0000000c, "SEMAPHORE_BG", NULL },
194 { 0x0000000d, "PCOPY", NULL },
195 { 0x0000000e, "PDAEMON", NULL },
196 {}
197};
198
199static struct nouveau_enum vm_fault[] = {
200 { 0x00000000, "PT_NOT_PRESENT", NULL },
201 { 0x00000001, "PT_TOO_SHORT", NULL },
202 { 0x00000002, "PAGE_NOT_PRESENT", NULL },
203 { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
204 { 0x00000004, "PAGE_READ_ONLY", NULL },
205 { 0x00000006, "NULL_DMAOBJ", NULL },
206 { 0x00000007, "WRONG_MEMTYPE", NULL },
207 { 0x0000000b, "VRAM_LIMIT", NULL },
208 { 0x0000000f, "DMAOBJ_LIMIT", NULL },
209 {}
210};
211
212void
213nv50_fb_vm_trap(struct drm_device *dev, int display)
214{
215 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
216 struct drm_nouveau_private *dev_priv = dev->dev_private;
217 const struct nouveau_enum *en, *cl;
218 unsigned long flags;
219 u32 trap[6], idx, chinst;
220 u8 st0, st1, st2, st3;
221 int i, ch;
222
223 idx = nv_rd32(dev, 0x100c90);
224 if (!(idx & 0x80000000))
225 return;
226 idx &= 0x00ffffff;
227
228 for (i = 0; i < 6; i++) {
229 nv_wr32(dev, 0x100c90, idx | i << 24);
230 trap[i] = nv_rd32(dev, 0x100c94);
231 }
232 nv_wr32(dev, 0x100c90, idx | 0x80000000);
233
234 if (!display)
235 return;
236
237 /* lookup channel id */
238 chinst = (trap[2] << 16) | trap[1];
239 spin_lock_irqsave(&dev_priv->channels.lock, flags);
240 for (ch = 0; ch < pfifo->channels; ch++) {
241 struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
242
243 if (!chan || !chan->ramin)
244 continue;
245
246 if (chinst == chan->ramin->vinst >> 12)
247 break;
248 }
249 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
250
251 /* decode status bits into something more useful */
252 if (dev_priv->chipset < 0xa3 ||
253 dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
254 st0 = (trap[0] & 0x0000000f) >> 0;
255 st1 = (trap[0] & 0x000000f0) >> 4;
256 st2 = (trap[0] & 0x00000f00) >> 8;
257 st3 = (trap[0] & 0x0000f000) >> 12;
258 } else {
259 st0 = (trap[0] & 0x000000ff) >> 0;
260 st1 = (trap[0] & 0x0000ff00) >> 8;
261 st2 = (trap[0] & 0x00ff0000) >> 16;
262 st3 = (trap[0] & 0xff000000) >> 24;
263 }
264
265 NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
266 (trap[5] & 0x00000100) ? "read" : "write",
267 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
268
269 en = nouveau_enum_find(vm_engine, st0);
270 if (en)
271 printk("%s/", en->name);
272 else
273 printk("%02x/", st0);
274
275 cl = nouveau_enum_find(vm_client, st2);
276 if (cl)
277 printk("%s/", cl->name);
278 else
279 printk("%02x/", st2);
280
281 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
282 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
283 else cl = NULL;
284 if (cl)
285 printk("%s", cl->name);
286 else
287 printk("%02x", st3);
288
289 printk(" reason: ");
290 en = nouveau_enum_find(vm_fault, st1);
291 if (en)
292 printk("%s\n", en->name);
293 else
294 printk("0x%08x\n", st1);
295}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index ec24959e67a2..52068a0910dc 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
31 28
32int 29int
33nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -156,10 +150,11 @@ int
156nv50_fbcon_accel_init(struct fb_info *info) 150nv50_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 153 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
154 struct drm_device *dev = nfbdev->dev;
155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 switch (info->var.bits_per_pixel) { 160 switch (info->var.bits_per_pixel) {
@@ -189,7 +184,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
189 return -EINVAL; 184 return -EINVAL;
190 } 185 }
191 186
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); 187 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
188 0x502d, NULL, 0, &object);
193 if (ret) 189 if (ret)
194 return ret; 190 return ret;
195 191
@@ -202,9 +198,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
202 BEGIN_NV04(chan, NvSub2D, 0x0000, 1); 198 BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
203 OUT_RING(chan, Nv2D); 199 OUT_RING(chan, Nv2D);
204 BEGIN_NV04(chan, NvSub2D, 0x0184, 3); 200 BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
205 OUT_RING(chan, chan->vram_handle); 201 OUT_RING(chan, NvDmaFB);
206 OUT_RING(chan, chan->vram_handle); 202 OUT_RING(chan, NvDmaFB);
207 OUT_RING(chan, chan->vram_handle); 203 OUT_RING(chan, NvDmaFB);
208 BEGIN_NV04(chan, NvSub2D, 0x0290, 1); 204 BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
209 OUT_RING(chan, 0); 205 OUT_RING(chan, 0);
210 BEGIN_NV04(chan, NvSub2D, 0x0888, 1); 206 BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
new file mode 100644
index 000000000000..e0763ea88ee2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30#include "nouveau_fence.h"
31
32#include "nv50_display.h"
33
34struct nv50_fence_chan {
35 struct nouveau_fence_chan base;
36};
37
38struct nv50_fence_priv {
39 struct nouveau_fence_priv base;
40 struct nouveau_bo *bo;
41 spinlock_t lock;
42 u32 sequence;
43};
44
45static int
46nv50_fence_context_new(struct nouveau_channel *chan)
47{
48 struct drm_device *dev = chan->drm->dev;
49 struct nv50_fence_priv *priv = chan->drm->fence;
50 struct nv50_fence_chan *fctx;
51 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
52 struct nouveau_object *object;
53 int ret, i;
54
55 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
56 if (!fctx)
57 return -ENOMEM;
58
59 nouveau_fence_context_new(&fctx->base);
60
61 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
62 NvSema, 0x0002,
63 &(struct nv_dma_class) {
64 .flags = NV_DMA_TARGET_VRAM |
65 NV_DMA_ACCESS_RDWR,
66 .start = mem->start * PAGE_SIZE,
67 .limit = mem->size - 1,
68 }, sizeof(struct nv_dma_class),
69 &object);
70
71 /* dma objects for display sync channel semaphore blocks */
72 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
73 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
74
75 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
76 NvEvoSema0 + i, 0x003d,
77 &(struct nv_dma_class) {
78 .flags = NV_DMA_TARGET_VRAM |
79 NV_DMA_ACCESS_RDWR,
80 .start = bo->bo.offset,
81 .limit = bo->bo.offset + 0xfff,
82 }, sizeof(struct nv_dma_class),
83 &object);
84 }
85
86 if (ret)
87 nv10_fence_context_del(chan);
88 return ret;
89}
90
91int
92nv50_fence_create(struct nouveau_drm *drm)
93{
94 struct nv50_fence_priv *priv;
95 int ret = 0;
96
97 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
98 if (!priv)
99 return -ENOMEM;
100
101 priv->base.dtor = nv10_fence_destroy;
102 priv->base.context_new = nv50_fence_context_new;
103 priv->base.context_del = nv10_fence_context_del;
104 priv->base.emit = nv10_fence_emit;
105 priv->base.read = nv10_fence_read;
106 priv->base.sync = nv17_fence_sync;
107 spin_lock_init(&priv->lock);
108
109 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
110 0, 0x0000, NULL, &priv->bo);
111 if (!ret) {
112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
113 if (!ret)
114 ret = nouveau_bo_map(priv->bo);
115 if (ret)
116 nouveau_bo_ref(NULL, &priv->bo);
117 }
118
119 if (ret == 0) {
120 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
121 priv->base.sync = nv17_fence_sync;
122 }
123
124 if (ret)
125 nv10_fence_destroy(drm);
126 return ret;
127}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
deleted file mode 100644
index 5a440e89e918..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_ramht.h"
31#include "nouveau_vm.h"
32
33struct nv50_fifo_priv {
34 struct nouveau_fifo_priv base;
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37};
38
39struct nv50_fifo_chan {
40 struct nouveau_fifo_chan base;
41};
42
43void
44nv50_fifo_playlist_update(struct drm_device *dev)
45{
46 struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48 struct nouveau_gpuobj *cur;
49 int i, p;
50
51 cur = priv->playlist[priv->cur_playlist];
52 priv->cur_playlist = !priv->cur_playlist;
53
54 for (i = 0, p = 0; i < priv->base.channels; i++) {
55 if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
56 nv_wo32(cur, p++ * 4, i);
57 }
58
59 dev_priv->engine.instmem.flush(dev);
60
61 nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
62 nv_wr32(dev, 0x0032ec, p);
63 nv_wr32(dev, 0x002500, 0x00000101);
64}
65
66static int
67nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
68{
69 struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
70 struct nv50_fifo_chan *fctx;
71 struct drm_device *dev = chan->dev;
72 struct drm_nouveau_private *dev_priv = dev->dev_private;
73 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
74 u64 instance = chan->ramin->vinst >> 12;
75 unsigned long flags;
76 int ret = 0, i;
77
78 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
79 if (!fctx)
80 return -ENOMEM;
81 atomic_inc(&chan->vm->engref[engine]);
82
83 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
84 NV50_USER(chan->id), PAGE_SIZE);
85 if (!chan->user) {
86 ret = -ENOMEM;
87 goto error;
88 }
89
90 for (i = 0; i < 0x100; i += 4)
91 nv_wo32(chan->ramin, i, 0x00000000);
92 nv_wo32(chan->ramin, 0x3c, 0x403f6078);
93 nv_wo32(chan->ramin, 0x40, 0x00000000);
94 nv_wo32(chan->ramin, 0x44, 0x01003fff);
95 nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
96 nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
97 nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
98 drm_order(chan->dma.ib_max + 1) << 16);
99 nv_wo32(chan->ramin, 0x60, 0x7fffffff);
100 nv_wo32(chan->ramin, 0x78, 0x00000000);
101 nv_wo32(chan->ramin, 0x7c, 0x30000001);
102 nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
103 (4 << 24) /* SEARCH_FULL */ |
104 (chan->ramht->gpuobj->cinst >> 4));
105
106 dev_priv->engine.instmem.flush(dev);
107
108 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
109 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
110 nv50_fifo_playlist_update(dev);
111 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
112
113error:
114 if (ret)
115 priv->base.base.context_del(chan, engine);
116 return ret;
117}
118
119static bool
120nv50_fifo_kickoff(struct nouveau_channel *chan)
121{
122 struct drm_device *dev = chan->dev;
123 bool done = true;
124 u32 me;
125
126 /* HW bug workaround:
127 *
128 * PFIFO will hang forever if the connected engines don't report
129 * that they've processed the context switch request.
130 *
131 * In order for the kickoff to work, we need to ensure all the
132 * connected engines are in a state where they can answer.
133 *
134 * Newer chipsets don't seem to suffer from this issue, and well,
135 * there's also a "ignore these engines" bitmask reg we can use
136 * if we hit the issue there..
137 */
138
139 /* PME: make sure engine is enabled */
140 me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
141
142 /* do the kickoff... */
143 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
144 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
145 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
146 done = false;
147 }
148
149 /* restore any engine states we changed, and exit */
150 nv_wr32(dev, 0x00b860, me);
151 return done;
152}
153
154static void
155nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
156{
157 struct nv50_fifo_chan *fctx = chan->engctx[engine];
158 struct drm_device *dev = chan->dev;
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 unsigned long flags;
161
162 /* remove channel from playlist, will context switch if active */
163 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
164 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
165 nv50_fifo_playlist_update(dev);
166
167 /* tell any engines on this channel to unload their contexts */
168 nv50_fifo_kickoff(chan);
169
170 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
171 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
172
173 /* clean up */
174 if (chan->user) {
175 iounmap(chan->user);
176 chan->user = NULL;
177 }
178
179 atomic_dec(&chan->vm->engref[engine]);
180 chan->engctx[engine] = NULL;
181 kfree(fctx);
182}
183
184static int
185nv50_fifo_init(struct drm_device *dev, int engine)
186{
187 struct drm_nouveau_private *dev_priv = dev->dev_private;
188 u32 instance;
189 int i;
190
191 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
192 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
193 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
194 nv_wr32(dev, 0x002044, 0x01003fff);
195
196 nv_wr32(dev, 0x002100, 0xffffffff);
197 nv_wr32(dev, 0x002140, 0xffffffff);
198
199 for (i = 0; i < 128; i++) {
200 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
201 if (chan && chan->engctx[engine])
202 instance = 0x80000000 | chan->ramin->vinst >> 12;
203 else
204 instance = 0x00000000;
205 nv_wr32(dev, 0x002600 + (i * 4), instance);
206 }
207
208 nv50_fifo_playlist_update(dev);
209
210 nv_wr32(dev, 0x003200, 1);
211 nv_wr32(dev, 0x003250, 1);
212 nv_wr32(dev, 0x002500, 1);
213 return 0;
214}
215
216static int
217nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
221 int i;
222
223 /* set playlist length to zero, fifo will unload context */
224 nv_wr32(dev, 0x0032ec, 0);
225
226 /* tell all connected engines to unload their contexts */
227 for (i = 0; i < priv->base.channels; i++) {
228 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
229 if (chan && !nv50_fifo_kickoff(chan))
230 return -EBUSY;
231 }
232
233 nv_wr32(dev, 0x002140, 0);
234 return 0;
235}
236
237void
238nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
239{
240 nv50_vm_flush_engine(dev, 5);
241}
242
243void
244nv50_fifo_destroy(struct drm_device *dev, int engine)
245{
246 struct drm_nouveau_private *dev_priv = dev->dev_private;
247 struct nv50_fifo_priv *priv = nv_engine(dev, engine);
248
249 nouveau_irq_unregister(dev, 8);
250
251 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
252 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
253
254 dev_priv->eng[engine] = NULL;
255 kfree(priv);
256}
257
258int
259nv50_fifo_create(struct drm_device *dev)
260{
261 struct drm_nouveau_private *dev_priv = dev->dev_private;
262 struct nv50_fifo_priv *priv;
263 int ret;
264
265 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
266 if (!priv)
267 return -ENOMEM;
268
269 priv->base.base.destroy = nv50_fifo_destroy;
270 priv->base.base.init = nv50_fifo_init;
271 priv->base.base.fini = nv50_fifo_fini;
272 priv->base.base.context_new = nv50_fifo_context_new;
273 priv->base.base.context_del = nv50_fifo_context_del;
274 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
275 priv->base.channels = 127;
276 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
277
278 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
279 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
280 if (ret)
281 goto error;
282
283 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
284 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
285 if (ret)
286 goto error;
287
288 nouveau_irq_register(dev, 8, nv04_fifo_isr);
289error:
290 if (ret)
291 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
292 return ret;
293}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
deleted file mode 100644
index c86a5fcc5e69..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dmi.h>
26#include <drm/drmP.h>
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29#include "nouveau_gpio.h"
30
31#include "nv50_display.h"
32
33static int
34nv50_gpio_location(int line, u32 *reg, u32 *shift)
35{
36 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
37
38 if (line >= 32)
39 return -EINVAL;
40
41 *reg = nv50_gpio_reg[line >> 3];
42 *shift = (line & 7) << 2;
43 return 0;
44}
45
46int
47nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
48{
49 u32 reg, shift;
50
51 if (nv50_gpio_location(line, &reg, &shift))
52 return -EINVAL;
53
54 nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
55 return 0;
56}
57
58int
59nv50_gpio_sense(struct drm_device *dev, int line)
60{
61 u32 reg, shift;
62
63 if (nv50_gpio_location(line, &reg, &shift))
64 return -EINVAL;
65
66 return !!(nv_rd32(dev, reg) & (4 << shift));
67}
68
69void
70nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
71{
72 u32 reg = line < 16 ? 0xe050 : 0xe070;
73 u32 mask = 0x00010001 << (line & 0xf);
74
75 nv_wr32(dev, reg + 4, mask);
76 nv_mask(dev, reg + 0, mask, on ? mask : 0);
77}
78
79int
80nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
81{
82 u32 data = ((dir ^ 1) << 13) | (out << 12);
83 nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
84 nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
85 return 0;
86}
87
88int
89nvd0_gpio_sense(struct drm_device *dev, int line)
90{
91 return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
92}
93
94static void
95nv50_gpio_isr(struct drm_device *dev)
96{
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 u32 intr0, intr1 = 0;
99 u32 hi, lo;
100
101 intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
102 if (dev_priv->chipset >= 0x90)
103 intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
104
105 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
106 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
107 nouveau_gpio_isr(dev, 0, hi | lo);
108
109 nv_wr32(dev, 0xe054, intr0);
110 if (dev_priv->chipset >= 0x90)
111 nv_wr32(dev, 0xe074, intr1);
112}
113
114static struct dmi_system_id gpio_reset_ids[] = {
115 {
116 .ident = "Apple Macbook 10,1",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
119 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
120 }
121 },
122 { }
123};
124
125int
126nv50_gpio_init(struct drm_device *dev)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129
130 /* initialise gpios and routing to vbios defaults */
131 if (dmi_check_system(gpio_reset_ids))
132 nouveau_gpio_reset(dev);
133
134 /* disable, and ack any pending gpio interrupts */
135 nv_wr32(dev, 0xe050, 0x00000000);
136 nv_wr32(dev, 0xe054, 0xffffffff);
137 if (dev_priv->chipset >= 0x90) {
138 nv_wr32(dev, 0xe070, 0x00000000);
139 nv_wr32(dev, 0xe074, 0xffffffff);
140 }
141
142 nouveau_irq_register(dev, 21, nv50_gpio_isr);
143 return 0;
144}
145
146void
147nv50_gpio_fini(struct drm_device *dev)
148{
149 struct drm_nouveau_private *dev_priv = dev->dev_private;
150
151 nv_wr32(dev, 0xe050, 0x00000000);
152 if (dev_priv->chipset >= 0x90)
153 nv_wr32(dev, 0xe070, 0x00000000);
154 nouveau_irq_unregister(dev, 21);
155}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
deleted file mode 100644
index f8a9c8095297..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ /dev/null
@@ -1,867 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_ramht.h"
31#include "nouveau_dma.h"
32#include "nouveau_vm.h"
33#include "nv50_evo.h"
34
35struct nv50_graph_engine {
36 struct nouveau_exec_engine base;
37 u32 ctxprog[512];
38 u32 ctxprog_size;
39 u32 grctx_size;
40};
41
42static int
43nv50_graph_init(struct drm_device *dev, int engine)
44{
45 struct drm_nouveau_private *dev_priv = dev->dev_private;
46 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
47 u32 units = nv_rd32(dev, 0x001540);
48 int i;
49
50 NV_DEBUG(dev, "\n");
51
52 /* master reset */
53 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
55 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
56
57 /* reset/enable traps and interrupts */
58 nv_wr32(dev, 0x400804, 0xc0000000);
59 nv_wr32(dev, 0x406800, 0xc0000000);
60 nv_wr32(dev, 0x400c04, 0xc0000000);
61 nv_wr32(dev, 0x401800, 0xc0000000);
62 nv_wr32(dev, 0x405018, 0xc0000000);
63 nv_wr32(dev, 0x402000, 0xc0000000);
64 for (i = 0; i < 16; i++) {
65 if (!(units & (1 << i)))
66 continue;
67
68 if (dev_priv->chipset < 0xa0) {
69 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
70 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
71 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
72 } else {
73 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
74 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
75 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
76 }
77 }
78
79 nv_wr32(dev, 0x400108, 0xffffffff);
80 nv_wr32(dev, 0x400138, 0xffffffff);
81 nv_wr32(dev, 0x400100, 0xffffffff);
82 nv_wr32(dev, 0x40013c, 0xffffffff);
83 nv_wr32(dev, 0x400500, 0x00010001);
84
85 /* upload context program, initialise ctxctl defaults */
86 nv_wr32(dev, 0x400324, 0x00000000);
87 for (i = 0; i < pgraph->ctxprog_size; i++)
88 nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
89 nv_wr32(dev, 0x400824, 0x00000000);
90 nv_wr32(dev, 0x400828, 0x00000000);
91 nv_wr32(dev, 0x40082c, 0x00000000);
92 nv_wr32(dev, 0x400830, 0x00000000);
93 nv_wr32(dev, 0x400724, 0x00000000);
94 nv_wr32(dev, 0x40032c, 0x00000000);
95 nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
96
97 /* some unknown zcull magic */
98 switch (dev_priv->chipset & 0xf0) {
99 case 0x50:
100 case 0x80:
101 case 0x90:
102 nv_wr32(dev, 0x402ca8, 0x00000800);
103 break;
104 case 0xa0:
105 default:
106 nv_wr32(dev, 0x402cc0, 0x00000000);
107 if (dev_priv->chipset == 0xa0 ||
108 dev_priv->chipset == 0xaa ||
109 dev_priv->chipset == 0xac) {
110 nv_wr32(dev, 0x402ca8, 0x00000802);
111 } else {
112 nv_wr32(dev, 0x402cc0, 0x00000000);
113 nv_wr32(dev, 0x402ca8, 0x00000002);
114 }
115
116 break;
117 }
118
119 /* zero out zcull regions */
120 for (i = 0; i < 8; i++) {
121 nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
122 nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
123 nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
124 nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
125 }
126
127 return 0;
128}
129
130static int
131nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
132{
133 nv_wr32(dev, 0x40013c, 0x00000000);
134 return 0;
135}
136
137static int
138nv50_graph_context_new(struct nouveau_channel *chan, int engine)
139{
140 struct drm_device *dev = chan->dev;
141 struct drm_nouveau_private *dev_priv = dev->dev_private;
142 struct nouveau_gpuobj *ramin = chan->ramin;
143 struct nouveau_gpuobj *grctx = NULL;
144 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
145 int hdr, ret;
146
147 NV_DEBUG(dev, "ch%d\n", chan->id);
148
149 ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
150 NVOBJ_FLAG_ZERO_ALLOC |
151 NVOBJ_FLAG_ZERO_FREE, &grctx);
152 if (ret)
153 return ret;
154
155 hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
156 nv_wo32(ramin, hdr + 0x00, 0x00190002);
157 nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
158 nv_wo32(ramin, hdr + 0x08, grctx->vinst);
159 nv_wo32(ramin, hdr + 0x0c, 0);
160 nv_wo32(ramin, hdr + 0x10, 0);
161 nv_wo32(ramin, hdr + 0x14, 0x00010000);
162
163 nv50_grctx_fill(dev, grctx);
164 nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
165
166 dev_priv->engine.instmem.flush(dev);
167
168 atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
169 chan->engctx[NVOBJ_ENGINE_GR] = grctx;
170 return 0;
171}
172
173static void
174nv50_graph_context_del(struct nouveau_channel *chan, int engine)
175{
176 struct nouveau_gpuobj *grctx = chan->engctx[engine];
177 struct drm_device *dev = chan->dev;
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
180
181 for (i = hdr; i < hdr + 24; i += 4)
182 nv_wo32(chan->ramin, i, 0);
183 dev_priv->engine.instmem.flush(dev);
184
185 atomic_dec(&chan->vm->engref[engine]);
186 nouveau_gpuobj_ref(NULL, &grctx);
187 chan->engctx[engine] = NULL;
188}
189
190static int
191nv50_graph_object_new(struct nouveau_channel *chan, int engine,
192 u32 handle, u16 class)
193{
194 struct drm_device *dev = chan->dev;
195 struct drm_nouveau_private *dev_priv = dev->dev_private;
196 struct nouveau_gpuobj *obj = NULL;
197 int ret;
198
199 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
200 if (ret)
201 return ret;
202 obj->engine = 1;
203 obj->class = class;
204
205 nv_wo32(obj, 0x00, class);
206 nv_wo32(obj, 0x04, 0x00000000);
207 nv_wo32(obj, 0x08, 0x00000000);
208 nv_wo32(obj, 0x0c, 0x00000000);
209 dev_priv->engine.instmem.flush(dev);
210
211 ret = nouveau_ramht_insert(chan, handle, obj);
212 nouveau_gpuobj_ref(NULL, &obj);
213 return ret;
214}
215
216static void
217nv50_graph_tlb_flush(struct drm_device *dev, int engine)
218{
219 nv50_vm_flush_engine(dev, 0);
220}
221
222static void
223nv84_graph_tlb_flush(struct drm_device *dev, int engine)
224{
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
227 bool idle, timeout = false;
228 unsigned long flags;
229 u64 start;
230 u32 tmp;
231
232 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
233 nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
234
235 start = ptimer->read(dev);
236 do {
237 idle = true;
238
239 for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
240 if ((tmp & 7) == 1)
241 idle = false;
242 }
243
244 for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
245 if ((tmp & 7) == 1)
246 idle = false;
247 }
248
249 for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
250 if ((tmp & 7) == 1)
251 idle = false;
252 }
253 } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
254
255 if (timeout) {
256 NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
257 "0x%08x 0x%08x 0x%08x 0x%08x\n",
258 nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
259 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
260 }
261
262 nv50_vm_flush_engine(dev, 0);
263
264 nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
265 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
266}
267
268static struct nouveau_enum nv50_mp_exec_error_names[] = {
269 { 3, "STACK_UNDERFLOW", NULL },
270 { 4, "QUADON_ACTIVE", NULL },
271 { 8, "TIMEOUT", NULL },
272 { 0x10, "INVALID_OPCODE", NULL },
273 { 0x40, "BREAKPOINT", NULL },
274 {}
275};
276
277static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
278 { 0x00000001, "NOTIFY" },
279 { 0x00000002, "IN" },
280 { 0x00000004, "OUT" },
281 {}
282};
283
284static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
285 { 0x00000001, "FAULT" },
286 {}
287};
288
289static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
290 { 0x00000001, "FAULT" },
291 {}
292};
293
294static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
295 { 0x00000001, "FAULT" },
296 {}
297};
298
299/* There must be a *lot* of these. Will take some time to gather them up. */
300struct nouveau_enum nv50_data_error_names[] = {
301 { 0x00000003, "INVALID_OPERATION", NULL },
302 { 0x00000004, "INVALID_VALUE", NULL },
303 { 0x00000005, "INVALID_ENUM", NULL },
304 { 0x00000008, "INVALID_OBJECT", NULL },
305 { 0x00000009, "READ_ONLY_OBJECT", NULL },
306 { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
307 { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
308 { 0x0000000c, "INVALID_BITFIELD", NULL },
309 { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
310 { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
311 { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
312 { 0x00000010, "RT_DOUBLE_BIND", NULL },
313 { 0x00000011, "RT_TYPES_MISMATCH", NULL },
314 { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
315 { 0x00000015, "FP_TOO_FEW_REGS", NULL },
316 { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
317 { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
318 { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
319 { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
320 { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
321 { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
322 { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
323 { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
324 { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
325 { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
326 { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
327 { 0x00000024, "VP_ZERO_INPUTS", NULL },
328 { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
329 { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
330 { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
331 { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
332 { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
333 { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
334 { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
335 { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
336 { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
337 { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
338 { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
339 { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
340 { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
341 { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
342 { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
343 {}
344};
345
346static struct nouveau_bitfield nv50_graph_intr[] = {
347 { 0x00000001, "NOTIFY" },
348 { 0x00000002, "COMPUTE_QUERY" },
349 { 0x00000010, "ILLEGAL_MTHD" },
350 { 0x00000020, "ILLEGAL_CLASS" },
351 { 0x00000040, "DOUBLE_NOTIFY" },
352 { 0x00001000, "CONTEXT_SWITCH" },
353 { 0x00010000, "BUFFER_NOTIFY" },
354 { 0x00100000, "DATA_ERROR" },
355 { 0x00200000, "TRAP" },
356 { 0x01000000, "SINGLE_STEP" },
357 {}
358};
359
360static void
361nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
362{
363 struct drm_nouveau_private *dev_priv = dev->dev_private;
364 uint32_t units = nv_rd32(dev, 0x1540);
365 uint32_t addr, mp10, status, pc, oplow, ophigh;
366 int i;
367 int mps = 0;
368 for (i = 0; i < 4; i++) {
369 if (!(units & 1 << (i+24)))
370 continue;
371 if (dev_priv->chipset < 0xa0)
372 addr = 0x408200 + (tpid << 12) + (i << 7);
373 else
374 addr = 0x408100 + (tpid << 11) + (i << 7);
375 mp10 = nv_rd32(dev, addr + 0x10);
376 status = nv_rd32(dev, addr + 0x14);
377 if (!status)
378 continue;
379 if (display) {
380 nv_rd32(dev, addr + 0x20);
381 pc = nv_rd32(dev, addr + 0x24);
382 oplow = nv_rd32(dev, addr + 0x70);
383 ophigh = nv_rd32(dev, addr + 0x74);
384 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
385 "TP %d MP %d: ", tpid, i);
386 nouveau_enum_print(nv50_mp_exec_error_names, status);
387 printk(" at %06x warp %d, opcode %08x %08x\n",
388 pc&0xffffff, pc >> 24,
389 oplow, ophigh);
390 }
391 nv_wr32(dev, addr + 0x10, mp10);
392 nv_wr32(dev, addr + 0x14, 0);
393 mps++;
394 }
395 if (!mps && display)
396 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
397 "No MPs claiming errors?\n", tpid);
398}
399
400static void
401nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
402 uint32_t ustatus_new, int display, const char *name)
403{
404 struct drm_nouveau_private *dev_priv = dev->dev_private;
405 int tps = 0;
406 uint32_t units = nv_rd32(dev, 0x1540);
407 int i, r;
408 uint32_t ustatus_addr, ustatus;
409 for (i = 0; i < 16; i++) {
410 if (!(units & (1 << i)))
411 continue;
412 if (dev_priv->chipset < 0xa0)
413 ustatus_addr = ustatus_old + (i << 12);
414 else
415 ustatus_addr = ustatus_new + (i << 11);
416 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
417 if (!ustatus)
418 continue;
419 tps++;
420 switch (type) {
421 case 6: /* texture error... unknown for now */
422 if (display) {
423 NV_ERROR(dev, "magic set %d:\n", i);
424 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
425 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
426 nv_rd32(dev, r));
427 }
428 break;
429 case 7: /* MP error */
430 if (ustatus & 0x04030000) {
431 nv50_pgraph_mp_trap(dev, i, display);
432 ustatus &= ~0x04030000;
433 }
434 break;
435 case 8: /* TPDMA error */
436 {
437 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
438 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
439 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
440 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
441 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
442 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
443 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
444 /* 2d engine destination */
445 if (ustatus & 0x00000010) {
446 if (display) {
447 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
448 i, e14, e10);
449 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
450 i, e0c, e18, e1c, e20, e24);
451 }
452 ustatus &= ~0x00000010;
453 }
454 /* Render target */
455 if (ustatus & 0x00000040) {
456 if (display) {
457 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
458 i, e14, e10);
459 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
460 i, e0c, e18, e1c, e20, e24);
461 }
462 ustatus &= ~0x00000040;
463 }
464 /* CUDA memory: l[], g[] or stack. */
465 if (ustatus & 0x00000080) {
466 if (display) {
467 if (e18 & 0x80000000) {
468 /* g[] read fault? */
469 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
470 i, e14, e10 | ((e18 >> 24) & 0x1f));
471 e18 &= ~0x1f000000;
472 } else if (e18 & 0xc) {
473 /* g[] write fault? */
474 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
475 i, e14, e10 | ((e18 >> 7) & 0x1f));
476 e18 &= ~0x00000f80;
477 } else {
478 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
479 i, e14, e10);
480 }
481 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
482 i, e0c, e18, e1c, e20, e24);
483 }
484 ustatus &= ~0x00000080;
485 }
486 }
487 break;
488 }
489 if (ustatus) {
490 if (display)
491 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
492 }
493 nv_wr32(dev, ustatus_addr, 0xc0000000);
494 }
495
496 if (!tps && display)
497 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
498}
499
500static int
501nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
502{
503 u32 status = nv_rd32(dev, 0x400108);
504 u32 ustatus;
505
506 if (!status && display) {
507 NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
508 return 1;
509 }
510
511 /* DISPATCH: Relays commands to other units and handles NOTIFY,
512 * COND, QUERY. If you get a trap from it, the command is still stuck
513 * in DISPATCH and you need to do something about it. */
514 if (status & 0x001) {
515 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
516 if (!ustatus && display) {
517 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
518 }
519
520 nv_wr32(dev, 0x400500, 0x00000000);
521
522 /* Known to be triggered by screwed up NOTIFY and COND... */
523 if (ustatus & 0x00000001) {
524 u32 addr = nv_rd32(dev, 0x400808);
525 u32 subc = (addr & 0x00070000) >> 16;
526 u32 mthd = (addr & 0x00001ffc);
527 u32 datal = nv_rd32(dev, 0x40080c);
528 u32 datah = nv_rd32(dev, 0x400810);
529 u32 class = nv_rd32(dev, 0x400814);
530 u32 r848 = nv_rd32(dev, 0x400848);
531
532 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
533 if (display && (addr & 0x80000000)) {
534 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
535 "subc %d class 0x%04x mthd 0x%04x "
536 "data 0x%08x%08x "
537 "400808 0x%08x 400848 0x%08x\n",
538 chid, inst, subc, class, mthd, datah,
539 datal, addr, r848);
540 } else
541 if (display) {
542 NV_INFO(dev, "PGRAPH - no stuck command?\n");
543 }
544
545 nv_wr32(dev, 0x400808, 0);
546 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
547 nv_wr32(dev, 0x400848, 0);
548 ustatus &= ~0x00000001;
549 }
550
551 if (ustatus & 0x00000002) {
552 u32 addr = nv_rd32(dev, 0x40084c);
553 u32 subc = (addr & 0x00070000) >> 16;
554 u32 mthd = (addr & 0x00001ffc);
555 u32 data = nv_rd32(dev, 0x40085c);
556 u32 class = nv_rd32(dev, 0x400814);
557
558 NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
559 if (display && (addr & 0x80000000)) {
560 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
561 "subc %d class 0x%04x mthd 0x%04x "
562 "data 0x%08x 40084c 0x%08x\n",
563 chid, inst, subc, class, mthd,
564 data, addr);
565 } else
566 if (display) {
567 NV_INFO(dev, "PGRAPH - no stuck command?\n");
568 }
569
570 nv_wr32(dev, 0x40084c, 0);
571 ustatus &= ~0x00000002;
572 }
573
574 if (ustatus && display) {
575 NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
576 "0x%08x)\n", ustatus);
577 }
578
579 nv_wr32(dev, 0x400804, 0xc0000000);
580 nv_wr32(dev, 0x400108, 0x001);
581 status &= ~0x001;
582 if (!status)
583 return 0;
584 }
585
586 /* M2MF: Memory to memory copy engine. */
587 if (status & 0x002) {
588 u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
589 if (display) {
590 NV_INFO(dev, "PGRAPH - TRAP_M2MF");
591 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
592 printk("\n");
593 NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
594 nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
595 nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
596
597 }
598
599 /* No sane way found yet -- just reset the bugger. */
600 nv_wr32(dev, 0x400040, 2);
601 nv_wr32(dev, 0x400040, 0);
602 nv_wr32(dev, 0x406800, 0xc0000000);
603 nv_wr32(dev, 0x400108, 0x002);
604 status &= ~0x002;
605 }
606
607 /* VFETCH: Fetches data from vertex buffers. */
608 if (status & 0x004) {
609 u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
610 if (display) {
611 NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
612 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
613 printk("\n");
614 NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
615 nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
616 nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
617 }
618
619 nv_wr32(dev, 0x400c04, 0xc0000000);
620 nv_wr32(dev, 0x400108, 0x004);
621 status &= ~0x004;
622 }
623
624 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
625 if (status & 0x008) {
626 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
627 if (display) {
628 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
629 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
630 printk("\n");
631 NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
632 nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
633 nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
634
635 }
636
637 /* No sane way found yet -- just reset the bugger. */
638 nv_wr32(dev, 0x400040, 0x80);
639 nv_wr32(dev, 0x400040, 0);
640 nv_wr32(dev, 0x401800, 0xc0000000);
641 nv_wr32(dev, 0x400108, 0x008);
642 status &= ~0x008;
643 }
644
645 /* CCACHE: Handles code and c[] caches and fills them. */
646 if (status & 0x010) {
647 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
648 if (display) {
649 NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
650 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
651 printk("\n");
652 NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
653 " %08x %08x %08x\n",
654 nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
655 nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
656 nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
657 nv_rd32(dev, 0x40501c));
658
659 }
660
661 nv_wr32(dev, 0x405018, 0xc0000000);
662 nv_wr32(dev, 0x400108, 0x010);
663 status &= ~0x010;
664 }
665
666 /* Unknown, not seen yet... 0x402000 is the only trap status reg
667 * remaining, so try to handle it anyway. Perhaps related to that
668 * unknown DMA slot on tesla? */
669 if (status & 0x20) {
670 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
671 if (display)
672 NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
673 nv_wr32(dev, 0x402000, 0xc0000000);
674 /* no status modifiction on purpose */
675 }
676
677 /* TEXTURE: CUDA texturing units */
678 if (status & 0x040) {
679 nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
680 "PGRAPH - TRAP_TEXTURE");
681 nv_wr32(dev, 0x400108, 0x040);
682 status &= ~0x040;
683 }
684
685 /* MP: CUDA execution engines. */
686 if (status & 0x080) {
687 nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
688 "PGRAPH - TRAP_MP");
689 nv_wr32(dev, 0x400108, 0x080);
690 status &= ~0x080;
691 }
692
693 /* TPDMA: Handles TP-initiated uncached memory accesses:
694 * l[], g[], stack, 2d surfaces, render targets. */
695 if (status & 0x100) {
696 nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
697 "PGRAPH - TRAP_TPDMA");
698 nv_wr32(dev, 0x400108, 0x100);
699 status &= ~0x100;
700 }
701
702 if (status) {
703 if (display)
704 NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
705 nv_wr32(dev, 0x400108, status);
706 }
707
708 return 1;
709}
710
711int
712nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
713{
714 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
715 struct drm_nouveau_private *dev_priv = dev->dev_private;
716 struct nouveau_channel *chan;
717 unsigned long flags;
718 int i;
719
720 spin_lock_irqsave(&dev_priv->channels.lock, flags);
721 for (i = 0; i < pfifo->channels; i++) {
722 chan = dev_priv->channels.ptr[i];
723 if (!chan || !chan->ramin)
724 continue;
725
726 if (inst == chan->ramin->vinst)
727 break;
728 }
729 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
730 return i;
731}
732
733static void
734nv50_graph_isr(struct drm_device *dev)
735{
736 u32 stat;
737
738 while ((stat = nv_rd32(dev, 0x400100))) {
739 u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
740 u32 chid = nv50_graph_isr_chid(dev, inst);
741 u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
742 u32 subc = (addr & 0x00070000) >> 16;
743 u32 mthd = (addr & 0x00001ffc);
744 u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
745 u32 class = nv_rd32(dev, 0x400814);
746 u32 show = stat;
747
748 if (stat & 0x00000010) {
749 if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
750 mthd, data))
751 show &= ~0x00000010;
752 }
753
754 show = (show && nouveau_ratelimit()) ? show : 0;
755
756 if (show & 0x00100000) {
757 u32 ecode = nv_rd32(dev, 0x400110);
758 NV_INFO(dev, "PGRAPH - DATA_ERROR ");
759 nouveau_enum_print(nv50_data_error_names, ecode);
760 printk("\n");
761 }
762
763 if (stat & 0x00200000) {
764 if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
765 show &= ~0x00200000;
766 }
767
768 nv_wr32(dev, 0x400100, stat);
769 nv_wr32(dev, 0x400500, 0x00010001);
770
771 if (show) {
772 NV_INFO(dev, "PGRAPH -");
773 nouveau_bitfield_print(nv50_graph_intr, show);
774 printk("\n");
775 NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
776 "class 0x%04x mthd 0x%04x data 0x%08x\n",
777 chid, inst, subc, class, mthd, data);
778 nv50_fb_vm_trap(dev, 1);
779 }
780 }
781
782 if (nv_rd32(dev, 0x400824) & (1 << 31))
783 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
784}
785
786static void
787nv50_graph_destroy(struct drm_device *dev, int engine)
788{
789 struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
790
791 NVOBJ_ENGINE_DEL(dev, GR);
792
793 nouveau_irq_unregister(dev, 12);
794 kfree(pgraph);
795}
796
797int
798nv50_graph_create(struct drm_device *dev)
799{
800 struct drm_nouveau_private *dev_priv = dev->dev_private;
801 struct nv50_graph_engine *pgraph;
802 int ret;
803
804 pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
805 if (!pgraph)
806 return -ENOMEM;
807
808 ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
809 &pgraph->ctxprog_size,
810 &pgraph->grctx_size);
811 if (ret) {
812 NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
813 kfree(pgraph);
814 return 0;
815 }
816
817 pgraph->base.destroy = nv50_graph_destroy;
818 pgraph->base.init = nv50_graph_init;
819 pgraph->base.fini = nv50_graph_fini;
820 pgraph->base.context_new = nv50_graph_context_new;
821 pgraph->base.context_del = nv50_graph_context_del;
822 pgraph->base.object_new = nv50_graph_object_new;
823 if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
824 pgraph->base.tlb_flush = nv50_graph_tlb_flush;
825 else
826 pgraph->base.tlb_flush = nv84_graph_tlb_flush;
827
828 nouveau_irq_register(dev, 12, nv50_graph_isr);
829
830 NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
831 NVOBJ_CLASS(dev, 0x0030, GR); /* null */
832 NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
833 NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
834
835 /* tesla */
836 if (dev_priv->chipset == 0x50)
837 NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
838 else
839 if (dev_priv->chipset < 0xa0)
840 NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
841 else {
842 switch (dev_priv->chipset) {
843 case 0xa0:
844 case 0xaa:
845 case 0xac:
846 NVOBJ_CLASS(dev, 0x8397, GR);
847 break;
848 case 0xa3:
849 case 0xa5:
850 case 0xa8:
851 NVOBJ_CLASS(dev, 0x8597, GR);
852 break;
853 case 0xaf:
854 NVOBJ_CLASS(dev, 0x8697, GR);
855 break;
856 }
857 }
858
859 /* compute */
860 NVOBJ_CLASS(dev, 0x50c0, GR);
861 if (dev_priv->chipset > 0xa0 &&
862 dev_priv->chipset != 0xaa &&
863 dev_priv->chipset != 0xac)
864 NVOBJ_CLASS(dev, 0x85c0, GR);
865
866 return 0;
867}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
deleted file mode 100644
index 05eff577f053..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ /dev/null
@@ -1,427 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include <drm/drmP.h>
29
30#include "nouveau_drv.h"
31#include "nouveau_vm.h"
32
33#define BAR1_VM_BASE 0x0020000000ULL
34#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
35#define BAR3_VM_BASE 0x0000000000ULL
36#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
37
38struct nv50_instmem_priv {
39 uint32_t save1700[5]; /* 0x1700->0x1710 */
40
41 struct nouveau_gpuobj *bar1_dmaobj;
42 struct nouveau_gpuobj *bar3_dmaobj;
43};
44
45static void
46nv50_channel_del(struct nouveau_channel **pchan)
47{
48 struct nouveau_channel *chan;
49
50 chan = *pchan;
51 *pchan = NULL;
52 if (!chan)
53 return;
54
55 nouveau_gpuobj_ref(NULL, &chan->ramfc);
56 nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
57 nouveau_gpuobj_ref(NULL, &chan->vm_pd);
58 if (drm_mm_initialized(&chan->ramin_heap))
59 drm_mm_takedown(&chan->ramin_heap);
60 nouveau_gpuobj_ref(NULL, &chan->ramin);
61 kfree(chan);
62}
63
64static int
65nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
66 struct nouveau_channel **pchan)
67{
68 struct drm_nouveau_private *dev_priv = dev->dev_private;
69 u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
70 u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
71 struct nouveau_channel *chan;
72 int ret, i;
73
74 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
75 if (!chan)
76 return -ENOMEM;
77 chan->dev = dev;
78
79 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
80 if (ret) {
81 nv50_channel_del(&chan);
82 return ret;
83 }
84
85 ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
86 if (ret) {
87 nv50_channel_del(&chan);
88 return ret;
89 }
90
91 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
92 chan->ramin->pinst + pgd,
93 chan->ramin->vinst + pgd,
94 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
95 &chan->vm_pd);
96 if (ret) {
97 nv50_channel_del(&chan);
98 return ret;
99 }
100
101 for (i = 0; i < 0x4000; i += 8) {
102 nv_wo32(chan->vm_pd, i + 0, 0x00000000);
103 nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
104 }
105
106 ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
107 if (ret) {
108 nv50_channel_del(&chan);
109 return ret;
110 }
111
112 ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
113 chan->ramin->pinst + fc,
114 chan->ramin->vinst + fc, 0x100,
115 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
116 if (ret) {
117 nv50_channel_del(&chan);
118 return ret;
119 }
120
121 *pchan = chan;
122 return 0;
123}
124
125int
126nv50_instmem_init(struct drm_device *dev)
127{
128 struct drm_nouveau_private *dev_priv = dev->dev_private;
129 struct nv50_instmem_priv *priv;
130 struct nouveau_channel *chan;
131 struct nouveau_vm *vm;
132 int ret, i;
133 u32 tmp;
134
135 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
136 if (!priv)
137 return -ENOMEM;
138 dev_priv->engine.instmem.priv = priv;
139
140 /* Save state, will restore at takedown. */
141 for (i = 0x1700; i <= 0x1710; i += 4)
142 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
143
144 /* Global PRAMIN heap */
145 ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
146 if (ret) {
147 NV_ERROR(dev, "Failed to init RAMIN heap\n");
148 goto error;
149 }
150
151 /* BAR3 */
152 ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
153 &dev_priv->bar3_vm);
154 if (ret)
155 goto error;
156
157 ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
158 0x1000, NVOBJ_FLAG_DONT_MAP |
159 NVOBJ_FLAG_ZERO_ALLOC,
160 &dev_priv->bar3_vm->pgt[0].obj[0]);
161 if (ret)
162 goto error;
163 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
164
165 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
166
167 ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
168 if (ret)
169 goto error;
170 dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
171
172 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
173 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
174 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
175 &priv->bar3_dmaobj);
176 if (ret)
177 goto error;
178
179 nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
180 nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
181 nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
182
183 dev_priv->engine.instmem.flush(dev);
184 dev_priv->ramin_available = true;
185
186 tmp = nv_ro32(chan->ramin, 0);
187 nv_wo32(chan->ramin, 0, ~tmp);
188 if (nv_ro32(chan->ramin, 0) != ~tmp) {
189 NV_ERROR(dev, "PRAMIN readback failed\n");
190 ret = -EIO;
191 goto error;
192 }
193 nv_wo32(chan->ramin, 0, tmp);
194
195 /* BAR1 */
196 ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
197 if (ret)
198 goto error;
199
200 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
201 if (ret)
202 goto error;
203 nouveau_vm_ref(NULL, &vm, NULL);
204
205 ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
206 NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
207 NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
208 &priv->bar1_dmaobj);
209 if (ret)
210 goto error;
211
212 nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
213 for (i = 0; i < 8; i++)
214 nv_wr32(dev, 0x1900 + (i*4), 0);
215
216 /* Create shared channel VM, space is reserved at the beginning
217 * to catch "NULL pointer" references
218 */
219 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
220 &dev_priv->chan_vm);
221 if (ret)
222 return ret;
223
224 return 0;
225
226error:
227 nv50_instmem_takedown(dev);
228 return ret;
229}
230
231void
232nv50_instmem_takedown(struct drm_device *dev)
233{
234 struct drm_nouveau_private *dev_priv = dev->dev_private;
235 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
236 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
237 int i;
238
239 NV_DEBUG(dev, "\n");
240
241 if (!priv)
242 return;
243
244 dev_priv->ramin_available = false;
245
246 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
247
248 for (i = 0x1700; i <= 0x1710; i += 4)
249 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
250
251 nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
252 nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
253
254 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
255 dev_priv->channels.ptr[127] = 0;
256 nv50_channel_del(&dev_priv->channels.ptr[0]);
257
258 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
259 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
260
261 if (drm_mm_initialized(&dev_priv->ramin_heap))
262 drm_mm_takedown(&dev_priv->ramin_heap);
263
264 dev_priv->engine.instmem.priv = NULL;
265 kfree(priv);
266}
267
268int
269nv50_instmem_suspend(struct drm_device *dev)
270{
271 struct drm_nouveau_private *dev_priv = dev->dev_private;
272
273 dev_priv->ramin_available = false;
274 return 0;
275}
276
277void
278nv50_instmem_resume(struct drm_device *dev)
279{
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
282 struct nouveau_channel *chan = dev_priv->channels.ptr[0];
283 int i;
284
285 /* Poke the relevant regs, and pray it works :) */
286 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
287 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
288 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
289 NV50_PUNK_BAR_CFG_BASE_VALID);
290 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
291 NV50_PUNK_BAR1_CTXDMA_VALID);
292 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
293 NV50_PUNK_BAR3_CTXDMA_VALID);
294
295 for (i = 0; i < 8; i++)
296 nv_wr32(dev, 0x1900 + (i*4), 0);
297
298 dev_priv->ramin_available = true;
299}
300
301struct nv50_gpuobj_node {
302 struct nouveau_mem *vram;
303 struct nouveau_vma chan_vma;
304 u32 align;
305};
306
307int
308nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
309 u32 size, u32 align)
310{
311 struct drm_device *dev = gpuobj->dev;
312 struct drm_nouveau_private *dev_priv = dev->dev_private;
313 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
314 struct nv50_gpuobj_node *node = NULL;
315 int ret;
316
317 node = kzalloc(sizeof(*node), GFP_KERNEL);
318 if (!node)
319 return -ENOMEM;
320 node->align = align;
321
322 size = (size + 4095) & ~4095;
323 align = max(align, (u32)4096);
324
325 ret = vram->get(dev, size, align, 0, 0, &node->vram);
326 if (ret) {
327 kfree(node);
328 return ret;
329 }
330
331 gpuobj->vinst = node->vram->offset;
332
333 if (gpuobj->flags & NVOBJ_FLAG_VM) {
334 u32 flags = NV_MEM_ACCESS_RW;
335 if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
336 flags |= NV_MEM_ACCESS_SYS;
337
338 ret = nouveau_vm_get(chan->vm, size, 12, flags,
339 &node->chan_vma);
340 if (ret) {
341 vram->put(dev, &node->vram);
342 kfree(node);
343 return ret;
344 }
345
346 nouveau_vm_map(&node->chan_vma, node->vram);
347 gpuobj->linst = node->chan_vma.offset;
348 }
349
350 gpuobj->size = size;
351 gpuobj->node = node;
352 return 0;
353}
354
355void
356nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
357{
358 struct drm_device *dev = gpuobj->dev;
359 struct drm_nouveau_private *dev_priv = dev->dev_private;
360 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
361 struct nv50_gpuobj_node *node;
362
363 node = gpuobj->node;
364 gpuobj->node = NULL;
365
366 if (node->chan_vma.node) {
367 nouveau_vm_unmap(&node->chan_vma);
368 nouveau_vm_put(&node->chan_vma);
369 }
370 vram->put(dev, &node->vram);
371 kfree(node);
372}
373
374int
375nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
376{
377 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
378 struct nv50_gpuobj_node *node = gpuobj->node;
379 int ret;
380
381 ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
382 NV_MEM_ACCESS_RW, &node->vram->bar_vma);
383 if (ret)
384 return ret;
385
386 nouveau_vm_map(&node->vram->bar_vma, node->vram);
387 gpuobj->pinst = node->vram->bar_vma.offset;
388 return 0;
389}
390
391void
392nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
393{
394 struct nv50_gpuobj_node *node = gpuobj->node;
395
396 if (node->vram->bar_vma.node) {
397 nouveau_vm_unmap(&node->vram->bar_vma);
398 nouveau_vm_put(&node->vram->bar_vma);
399 }
400}
401
402void
403nv50_instmem_flush(struct drm_device *dev)
404{
405 struct drm_nouveau_private *dev_priv = dev->dev_private;
406 unsigned long flags;
407
408 spin_lock_irqsave(&dev_priv->vm_lock, flags);
409 nv_wr32(dev, 0x00330c, 0x00000001);
410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
411 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
413}
414
415void
416nv84_instmem_flush(struct drm_device *dev)
417{
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419 unsigned long flags;
420
421 spin_lock_irqsave(&dev_priv->vm_lock, flags);
422 nv_wr32(dev, 0x070000, 0x00000001);
423 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
424 NV_ERROR(dev, "PRAMIN flush timeout\n");
425 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
426}
427
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
deleted file mode 100644
index a739c2afae90..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mc.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29
30int
31nv50_mc_init(struct drm_device *dev)
32{
33 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
34 return 0;
35}
36
37void nv50_mc_takedown(struct drm_device *dev)
38{
39}
diff --git a/drivers/gpu/drm/nouveau/nv50_mpeg.c b/drivers/gpu/drm/nouveau/nv50_mpeg.c
deleted file mode 100644
index e11bb540727b..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_mpeg.c
+++ /dev/null
@@ -1,241 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_ramht.h"
28
29struct nv50_mpeg_engine {
30 struct nouveau_exec_engine base;
31};
32
33static inline u32
34CTX_PTR(struct drm_device *dev, u32 offset)
35{
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37
38 if (dev_priv->chipset == 0x50)
39 offset += 0x0260;
40 else
41 offset += 0x0060;
42
43 return offset;
44}
45
46static int
47nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
48{
49 struct drm_device *dev = chan->dev;
50 struct drm_nouveau_private *dev_priv = dev->dev_private;
51 struct nouveau_gpuobj *ramin = chan->ramin;
52 struct nouveau_gpuobj *ctx = NULL;
53 int ret;
54
55 NV_DEBUG(dev, "ch%d\n", chan->id);
56
57 ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &ctx);
59 if (ret)
60 return ret;
61
62 nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
63 nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
64 nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
65 nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
66 nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
67 nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
68
69 nv_wo32(ctx, 0x70, 0x00801ec1);
70 nv_wo32(ctx, 0x7c, 0x0000037c);
71 dev_priv->engine.instmem.flush(dev);
72
73 chan->engctx[engine] = ctx;
74 return 0;
75}
76
77static void
78nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
79{
80 struct nouveau_gpuobj *ctx = chan->engctx[engine];
81 struct drm_device *dev = chan->dev;
82 int i;
83
84 for (i = 0x00; i <= 0x14; i += 4)
85 nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
86
87 nouveau_gpuobj_ref(NULL, &ctx);
88 chan->engctx[engine] = NULL;
89}
90
91static int
92nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL;
98 int ret;
99
100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
101 if (ret)
102 return ret;
103 obj->engine = 2;
104 obj->class = class;
105
106 nv_wo32(obj, 0x00, class);
107 nv_wo32(obj, 0x04, 0x00000000);
108 nv_wo32(obj, 0x08, 0x00000000);
109 nv_wo32(obj, 0x0c, 0x00000000);
110 dev_priv->engine.instmem.flush(dev);
111
112 ret = nouveau_ramht_insert(chan, handle, obj);
113 nouveau_gpuobj_ref(NULL, &obj);
114 return ret;
115}
116
117static void
118nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
119{
120 nv50_vm_flush_engine(dev, 0x08);
121}
122
123static int
124nv50_mpeg_init(struct drm_device *dev, int engine)
125{
126 nv_wr32(dev, 0x00b32c, 0x00000000);
127 nv_wr32(dev, 0x00b314, 0x00000100);
128 nv_wr32(dev, 0x00b0e0, 0x0000001a);
129
130 nv_wr32(dev, 0x00b220, 0x00000044);
131 nv_wr32(dev, 0x00b300, 0x00801ec1);
132 nv_wr32(dev, 0x00b390, 0x00000000);
133 nv_wr32(dev, 0x00b394, 0x00000000);
134 nv_wr32(dev, 0x00b398, 0x00000000);
135 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
136
137 nv_wr32(dev, 0x00b100, 0xffffffff);
138 nv_wr32(dev, 0x00b140, 0xffffffff);
139
140 if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
141 NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
142 return -EBUSY;
143 }
144
145 return 0;
146}
147
148static int
149nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
150{
151 nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
152 nv_wr32(dev, 0x00b140, 0x00000000);
153 return 0;
154}
155
156static void
157nv50_mpeg_isr(struct drm_device *dev)
158{
159 u32 stat = nv_rd32(dev, 0x00b100);
160 u32 type = nv_rd32(dev, 0x00b230);
161 u32 mthd = nv_rd32(dev, 0x00b234);
162 u32 data = nv_rd32(dev, 0x00b238);
163 u32 show = stat;
164
165 if (stat & 0x01000000) {
166 /* happens on initial binding of the object */
167 if (type == 0x00000020 && mthd == 0x0000) {
168 nv_wr32(dev, 0x00b308, 0x00000100);
169 show &= ~0x01000000;
170 }
171 }
172
173 if (show && nouveau_ratelimit()) {
174 NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
175 stat, type, mthd, data);
176 }
177
178 nv_wr32(dev, 0x00b100, stat);
179 nv_wr32(dev, 0x00b230, 0x00000001);
180 nv50_fb_vm_trap(dev, 1);
181}
182
183static void
184nv50_vpe_isr(struct drm_device *dev)
185{
186 if (nv_rd32(dev, 0x00b100))
187 nv50_mpeg_isr(dev);
188
189 if (nv_rd32(dev, 0x00b800)) {
190 u32 stat = nv_rd32(dev, 0x00b800);
191 NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
192 nv_wr32(dev, 0xb800, stat);
193 }
194}
195
196static void
197nv50_mpeg_destroy(struct drm_device *dev, int engine)
198{
199 struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
200
201 nouveau_irq_unregister(dev, 0);
202
203 NVOBJ_ENGINE_DEL(dev, MPEG);
204 kfree(pmpeg);
205}
206
207int
208nv50_mpeg_create(struct drm_device *dev)
209{
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nv50_mpeg_engine *pmpeg;
212
213 pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
214 if (!pmpeg)
215 return -ENOMEM;
216
217 pmpeg->base.destroy = nv50_mpeg_destroy;
218 pmpeg->base.init = nv50_mpeg_init;
219 pmpeg->base.fini = nv50_mpeg_fini;
220 pmpeg->base.context_new = nv50_mpeg_context_new;
221 pmpeg->base.context_del = nv50_mpeg_context_del;
222 pmpeg->base.object_new = nv50_mpeg_object_new;
223 pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
224
225 if (dev_priv->chipset == 0x50) {
226 nouveau_irq_register(dev, 0, nv50_vpe_isr);
227 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
228 NVOBJ_CLASS(dev, 0x3174, MPEG);
229#if 0
230 NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
231 NVOBJ_CLASS(dev, 0x4075, ME);
232#endif
233 } else {
234 nouveau_irq_register(dev, 0, nv50_mpeg_isr);
235 NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
236 NVOBJ_CLASS(dev, 0x8274, MPEG);
237 }
238
239 return 0;
240
241}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 07593fd73af3..c4a65039b1ca 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -23,13 +23,19 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_hw.h" 28#include "nouveau_hw.h"
29#include "nouveau_pm.h" 29#include "nouveau_pm.h"
30#include "nouveau_hwsq.h" 30#include "nouveau_hwsq.h"
31
31#include "nv50_display.h" 32#include "nv50_display.h"
32 33
34#include <subdev/bios/pll.h>
35#include <subdev/clock.h>
36#include <subdev/timer.h>
37#include <subdev/fb.h>
38
33enum clk_src { 39enum clk_src {
34 clk_src_crystal, 40 clk_src_crystal,
35 clk_src_href, 41 clk_src_href,
@@ -49,19 +55,20 @@ static u32 read_clk(struct drm_device *, enum clk_src);
49static u32 55static u32
50read_div(struct drm_device *dev) 56read_div(struct drm_device *dev)
51{ 57{
52 struct drm_nouveau_private *dev_priv = dev->dev_private; 58 struct nouveau_device *device = nouveau_dev(dev);
59 struct nouveau_drm *drm = nouveau_drm(dev);
53 60
54 switch (dev_priv->chipset) { 61 switch (nv_device(drm->device)->chipset) {
55 case 0x50: /* it exists, but only has bit 31, not the dividers.. */ 62 case 0x50: /* it exists, but only has bit 31, not the dividers.. */
56 case 0x84: 63 case 0x84:
57 case 0x86: 64 case 0x86:
58 case 0x98: 65 case 0x98:
59 case 0xa0: 66 case 0xa0:
60 return nv_rd32(dev, 0x004700); 67 return nv_rd32(device, 0x004700);
61 case 0x92: 68 case 0x92:
62 case 0x94: 69 case 0x94:
63 case 0x96: 70 case 0x96:
64 return nv_rd32(dev, 0x004800); 71 return nv_rd32(device, 0x004800);
65 default: 72 default:
66 return 0x00000000; 73 return 0x00000000;
67 } 74 }
@@ -70,12 +77,13 @@ read_div(struct drm_device *dev)
70static u32 77static u32
71read_pll_src(struct drm_device *dev, u32 base) 78read_pll_src(struct drm_device *dev, u32 base)
72{ 79{
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 80 struct nouveau_device *device = nouveau_dev(dev);
81 struct nouveau_drm *drm = nouveau_drm(dev);
74 u32 coef, ref = read_clk(dev, clk_src_crystal); 82 u32 coef, ref = read_clk(dev, clk_src_crystal);
75 u32 rsel = nv_rd32(dev, 0x00e18c); 83 u32 rsel = nv_rd32(device, 0x00e18c);
76 int P, N, M, id; 84 int P, N, M, id;
77 85
78 switch (dev_priv->chipset) { 86 switch (nv_device(drm->device)->chipset) {
79 case 0x50: 87 case 0x50:
80 case 0xa0: 88 case 0xa0:
81 switch (base) { 89 switch (base) {
@@ -84,11 +92,11 @@ read_pll_src(struct drm_device *dev, u32 base)
84 case 0x4008: id = !!(rsel & 0x00000008); break; 92 case 0x4008: id = !!(rsel & 0x00000008); break;
85 case 0x4030: id = 0; break; 93 case 0x4030: id = 0; break;
86 default: 94 default:
87 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 95 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
88 return 0; 96 return 0;
89 } 97 }
90 98
91 coef = nv_rd32(dev, 0x00e81c + (id * 0x0c)); 99 coef = nv_rd32(device, 0x00e81c + (id * 0x0c));
92 ref *= (coef & 0x01000000) ? 2 : 4; 100 ref *= (coef & 0x01000000) ? 2 : 4;
93 P = (coef & 0x00070000) >> 16; 101 P = (coef & 0x00070000) >> 16;
94 N = ((coef & 0x0000ff00) >> 8) + 1; 102 N = ((coef & 0x0000ff00) >> 8) + 1;
@@ -97,7 +105,7 @@ read_pll_src(struct drm_device *dev, u32 base)
97 case 0x84: 105 case 0x84:
98 case 0x86: 106 case 0x86:
99 case 0x92: 107 case 0x92:
100 coef = nv_rd32(dev, 0x00e81c); 108 coef = nv_rd32(device, 0x00e81c);
101 P = (coef & 0x00070000) >> 16; 109 P = (coef & 0x00070000) >> 16;
102 N = (coef & 0x0000ff00) >> 8; 110 N = (coef & 0x0000ff00) >> 8;
103 M = (coef & 0x000000ff) >> 0; 111 M = (coef & 0x000000ff) >> 0;
@@ -105,14 +113,14 @@ read_pll_src(struct drm_device *dev, u32 base)
105 case 0x94: 113 case 0x94:
106 case 0x96: 114 case 0x96:
107 case 0x98: 115 case 0x98:
108 rsel = nv_rd32(dev, 0x00c050); 116 rsel = nv_rd32(device, 0x00c050);
109 switch (base) { 117 switch (base) {
110 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break; 118 case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
111 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break; 119 case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
112 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break; 120 case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
113 case 0x4030: rsel = 3; break; 121 case 0x4030: rsel = 3; break;
114 default: 122 default:
115 NV_ERROR(dev, "ref: bad pll 0x%06x\n", base); 123 NV_ERROR(drm, "ref: bad pll 0x%06x\n", base);
116 return 0; 124 return 0;
117 } 125 }
118 126
@@ -123,8 +131,8 @@ read_pll_src(struct drm_device *dev, u32 base)
123 case 3: id = 0; break; 131 case 3: id = 0; break;
124 } 132 }
125 133
126 coef = nv_rd32(dev, 0x00e81c + (id * 0x28)); 134 coef = nv_rd32(device, 0x00e81c + (id * 0x28));
127 P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7; 135 P = (nv_rd32(device, 0x00e824 + (id * 0x28)) >> 16) & 7;
128 P += (coef & 0x00070000) >> 16; 136 P += (coef & 0x00070000) >> 16;
129 N = (coef & 0x0000ff00) >> 8; 137 N = (coef & 0x0000ff00) >> 8;
130 M = (coef & 0x000000ff) >> 0; 138 M = (coef & 0x000000ff) >> 0;
@@ -141,7 +149,9 @@ read_pll_src(struct drm_device *dev, u32 base)
141static u32 149static u32
142read_pll_ref(struct drm_device *dev, u32 base) 150read_pll_ref(struct drm_device *dev, u32 base)
143{ 151{
144 u32 src, mast = nv_rd32(dev, 0x00c040); 152 struct nouveau_device *device = nouveau_dev(dev);
153 struct nouveau_drm *drm = nouveau_drm(dev);
154 u32 src, mast = nv_rd32(device, 0x00c040);
145 155
146 switch (base) { 156 switch (base) {
147 case 0x004028: 157 case 0x004028:
@@ -159,7 +169,7 @@ read_pll_ref(struct drm_device *dev, u32 base)
159 case 0x00e810: 169 case 0x00e810:
160 return read_clk(dev, clk_src_crystal); 170 return read_clk(dev, clk_src_crystal);
161 default: 171 default:
162 NV_ERROR(dev, "bad pll 0x%06x\n", base); 172 NV_ERROR(drm, "bad pll 0x%06x\n", base);
163 return 0; 173 return 0;
164 } 174 }
165 175
@@ -171,17 +181,18 @@ read_pll_ref(struct drm_device *dev, u32 base)
171static u32 181static u32
172read_pll(struct drm_device *dev, u32 base) 182read_pll(struct drm_device *dev, u32 base)
173{ 183{
174 struct drm_nouveau_private *dev_priv = dev->dev_private; 184 struct nouveau_device *device = nouveau_dev(dev);
175 u32 mast = nv_rd32(dev, 0x00c040); 185 struct nouveau_drm *drm = nouveau_drm(dev);
176 u32 ctrl = nv_rd32(dev, base + 0); 186 u32 mast = nv_rd32(device, 0x00c040);
177 u32 coef = nv_rd32(dev, base + 4); 187 u32 ctrl = nv_rd32(device, base + 0);
188 u32 coef = nv_rd32(device, base + 4);
178 u32 ref = read_pll_ref(dev, base); 189 u32 ref = read_pll_ref(dev, base);
179 u32 clk = 0; 190 u32 clk = 0;
180 int N1, N2, M1, M2; 191 int N1, N2, M1, M2;
181 192
182 if (base == 0x004028 && (mast & 0x00100000)) { 193 if (base == 0x004028 && (mast & 0x00100000)) {
183 /* wtf, appears to only disable post-divider on nva0 */ 194 /* wtf, appears to only disable post-divider on nva0 */
184 if (dev_priv->chipset != 0xa0) 195 if (nv_device(drm->device)->chipset != 0xa0)
185 return read_clk(dev, clk_src_dom6); 196 return read_clk(dev, clk_src_dom6);
186 } 197 }
187 198
@@ -205,13 +216,14 @@ read_pll(struct drm_device *dev, u32 base)
205static u32 216static u32
206read_clk(struct drm_device *dev, enum clk_src src) 217read_clk(struct drm_device *dev, enum clk_src src)
207{ 218{
208 struct drm_nouveau_private *dev_priv = dev->dev_private; 219 struct nouveau_device *device = nouveau_dev(dev);
209 u32 mast = nv_rd32(dev, 0x00c040); 220 struct nouveau_drm *drm = nouveau_drm(dev);
221 u32 mast = nv_rd32(device, 0x00c040);
210 u32 P = 0; 222 u32 P = 0;
211 223
212 switch (src) { 224 switch (src) {
213 case clk_src_crystal: 225 case clk_src_crystal:
214 return dev_priv->crystal; 226 return device->crystal;
215 case clk_src_href: 227 case clk_src_href:
216 return 100000; /* PCIE reference clock */ 228 return 100000; /* PCIE reference clock */
217 case clk_src_hclk: 229 case clk_src_hclk:
@@ -230,7 +242,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
230 break; 242 break;
231 case clk_src_nvclk: 243 case clk_src_nvclk:
232 if (!(mast & 0x00100000)) 244 if (!(mast & 0x00100000))
233 P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16; 245 P = (nv_rd32(device, 0x004028) & 0x00070000) >> 16;
234 switch (mast & 0x00000003) { 246 switch (mast & 0x00000003) {
235 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P; 247 case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
236 case 0x00000001: return read_clk(dev, clk_src_dom6); 248 case 0x00000001: return read_clk(dev, clk_src_dom6);
@@ -239,7 +251,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
239 } 251 }
240 break; 252 break;
241 case clk_src_sclk: 253 case clk_src_sclk:
242 P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16; 254 P = (nv_rd32(device, 0x004020) & 0x00070000) >> 16;
243 switch (mast & 0x00000030) { 255 switch (mast & 0x00000030) {
244 case 0x00000000: 256 case 0x00000000:
245 if (mast & 0x00000080) 257 if (mast & 0x00000080)
@@ -251,8 +263,8 @@ read_clk(struct drm_device *dev, enum clk_src src)
251 } 263 }
252 break; 264 break;
253 case clk_src_mclk: 265 case clk_src_mclk:
254 P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16; 266 P = (nv_rd32(device, 0x004008) & 0x00070000) >> 16;
255 if (nv_rd32(dev, 0x004008) & 0x00000200) { 267 if (nv_rd32(device, 0x004008) & 0x00000200) {
256 switch (mast & 0x0000c000) { 268 switch (mast & 0x0000c000) {
257 case 0x00000000: 269 case 0x00000000:
258 return read_clk(dev, clk_src_crystal) >> P; 270 return read_clk(dev, clk_src_crystal) >> P;
@@ -266,7 +278,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
266 break; 278 break;
267 case clk_src_vdec: 279 case clk_src_vdec:
268 P = (read_div(dev) & 0x00000700) >> 8; 280 P = (read_div(dev) & 0x00000700) >> 8;
269 switch (dev_priv->chipset) { 281 switch (nv_device(drm->device)->chipset) {
270 case 0x84: 282 case 0x84:
271 case 0x86: 283 case 0x86:
272 case 0x92: 284 case 0x92:
@@ -275,7 +287,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
275 case 0xa0: 287 case 0xa0:
276 switch (mast & 0x00000c00) { 288 switch (mast & 0x00000c00) {
277 case 0x00000000: 289 case 0x00000000:
278 if (dev_priv->chipset == 0xa0) /* wtf?? */ 290 if (nv_device(drm->device)->chipset == 0xa0) /* wtf?? */
279 return read_clk(dev, clk_src_nvclk) >> P; 291 return read_clk(dev, clk_src_nvclk) >> P;
280 return read_clk(dev, clk_src_crystal) >> P; 292 return read_clk(dev, clk_src_crystal) >> P;
281 case 0x00000400: 293 case 0x00000400:
@@ -303,7 +315,7 @@ read_clk(struct drm_device *dev, enum clk_src src)
303 } 315 }
304 break; 316 break;
305 case clk_src_dom6: 317 case clk_src_dom6:
306 switch (dev_priv->chipset) { 318 switch (nv_device(drm->device)->chipset) {
307 case 0x50: 319 case 0x50:
308 case 0xa0: 320 case 0xa0:
309 return read_pll(dev, 0x00e810) >> 2; 321 return read_pll(dev, 0x00e810) >> 2;
@@ -329,22 +341,22 @@ read_clk(struct drm_device *dev, enum clk_src src)
329 break; 341 break;
330 } 342 }
331 343
332 NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast); 344 NV_DEBUG(drm, "unknown clock source %d 0x%08x\n", src, mast);
333 return 0; 345 return 0;
334} 346}
335 347
336int 348int
337nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl) 349nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
338{ 350{
339 struct drm_nouveau_private *dev_priv = dev->dev_private; 351 struct nouveau_drm *drm = nouveau_drm(dev);
340 if (dev_priv->chipset == 0xaa || 352 if (nv_device(drm->device)->chipset == 0xaa ||
341 dev_priv->chipset == 0xac) 353 nv_device(drm->device)->chipset == 0xac)
342 return 0; 354 return 0;
343 355
344 perflvl->core = read_clk(dev, clk_src_nvclk); 356 perflvl->core = read_clk(dev, clk_src_nvclk);
345 perflvl->shader = read_clk(dev, clk_src_sclk); 357 perflvl->shader = read_clk(dev, clk_src_sclk);
346 perflvl->memory = read_clk(dev, clk_src_mclk); 358 perflvl->memory = read_clk(dev, clk_src_mclk);
347 if (dev_priv->chipset != 0x50) { 359 if (nv_device(drm->device)->chipset != 0x50) {
348 perflvl->vdec = read_clk(dev, clk_src_vdec); 360 perflvl->vdec = read_clk(dev, clk_src_vdec);
349 perflvl->dom6 = read_clk(dev, clk_src_dom6); 361 perflvl->dom6 = read_clk(dev, clk_src_dom6);
350 } 362 }
@@ -363,22 +375,25 @@ struct nv50_pm_state {
363}; 375};
364 376
365static u32 377static u32
366calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll, 378calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
367 u32 clk, int *N1, int *M1, int *log2P) 379 u32 clk, int *N1, int *M1, int *log2P)
368{ 380{
381 struct nouveau_device *device = nouveau_dev(dev);
382 struct nouveau_bios *bios = nouveau_bios(device);
383 struct nouveau_clock *pclk = nouveau_clock(device);
369 struct nouveau_pll_vals coef; 384 struct nouveau_pll_vals coef;
370 int ret; 385 int ret;
371 386
372 ret = get_pll_limits(dev, reg, pll); 387 ret = nvbios_pll_parse(bios, reg, pll);
373 if (ret) 388 if (ret)
374 return 0; 389 return 0;
375 390
376 pll->vco2.maxfreq = 0; 391 pll->vco2.max_freq = 0;
377 pll->refclk = read_pll_ref(dev, reg); 392 pll->refclk = read_pll_ref(dev, reg);
378 if (!pll->refclk) 393 if (!pll->refclk)
379 return 0; 394 return 0;
380 395
381 ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef); 396 ret = pclk->pll_calc(pclk, pll, clk, &coef);
382 if (ret == 0) 397 if (ret == 0)
383 return 0; 398 return 0;
384 399
@@ -461,27 +476,29 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
461static u32 476static u32
462mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 477mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
463{ 478{
479 struct nouveau_device *device = nouveau_dev(exec->dev);
464 if (mr <= 1) 480 if (mr <= 1)
465 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); 481 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
466 if (mr <= 3) 482 if (mr <= 3)
467 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); 483 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
468 return 0; 484 return 0;
469} 485}
470 486
471static void 487static void
472mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 488mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
473{ 489{
474 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 490 struct nouveau_device *device = nouveau_dev(exec->dev);
491 struct nouveau_fb *pfb = nouveau_fb(device);
475 struct nv50_pm_state *info = exec->priv; 492 struct nv50_pm_state *info = exec->priv;
476 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 493 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
477 494
478 if (mr <= 1) { 495 if (mr <= 1) {
479 if (dev_priv->vram_rank_B) 496 if (pfb->ram.ranks > 1)
480 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data); 497 hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
481 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data); 498 hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
482 } else 499 } else
483 if (mr <= 3) { 500 if (mr <= 3) {
484 if (dev_priv->vram_rank_B) 501 if (pfb->ram.ranks > 1)
485 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data); 502 hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
486 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data); 503 hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
487 } 504 }
@@ -490,11 +507,12 @@ mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
490static void 507static void
491mclk_clock_set(struct nouveau_mem_exec_func *exec) 508mclk_clock_set(struct nouveau_mem_exec_func *exec)
492{ 509{
510 struct nouveau_device *device = nouveau_dev(exec->dev);
493 struct nv50_pm_state *info = exec->priv; 511 struct nv50_pm_state *info = exec->priv;
494 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 512 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
495 u32 ctrl = nv_rd32(exec->dev, 0x004008); 513 u32 ctrl = nv_rd32(device, 0x004008);
496 514
497 info->mmast = nv_rd32(exec->dev, 0x00c040); 515 info->mmast = nv_rd32(device, 0x00c040);
498 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */ 516 info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
499 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */ 517 info->mmast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
500 518
@@ -508,7 +526,7 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
508static void 526static void
509mclk_timing_set(struct nouveau_mem_exec_func *exec) 527mclk_timing_set(struct nouveau_mem_exec_func *exec)
510{ 528{
511 struct drm_device *dev = exec->dev; 529 struct nouveau_device *device = nouveau_dev(exec->dev);
512 struct nv50_pm_state *info = exec->priv; 530 struct nv50_pm_state *info = exec->priv;
513 struct nouveau_pm_level *perflvl = info->perflvl; 531 struct nouveau_pm_level *perflvl = info->perflvl;
514 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 532 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
@@ -516,7 +534,7 @@ mclk_timing_set(struct nouveau_mem_exec_func *exec)
516 534
517 for (i = 0; i < 9; i++) { 535 for (i = 0; i < 9; i++) {
518 u32 reg = 0x100220 + (i * 4); 536 u32 reg = 0x100220 + (i * 4);
519 u32 val = nv_rd32(dev, reg); 537 u32 val = nv_rd32(device, reg);
520 if (val != perflvl->timing.reg[i]) 538 if (val != perflvl->timing.reg[i])
521 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]); 539 hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
522 } 540 }
@@ -526,7 +544,8 @@ static int
526calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl, 544calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
527 struct nv50_pm_state *info) 545 struct nv50_pm_state *info)
528{ 546{
529 struct drm_nouveau_private *dev_priv = dev->dev_private; 547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev);
530 u32 crtc_mask = nv50_display_active_crtcs(dev); 549 u32 crtc_mask = nv50_display_active_crtcs(dev);
531 struct nouveau_mem_exec_func exec = { 550 struct nouveau_mem_exec_func exec = {
532 .dev = dev, 551 .dev = dev,
@@ -542,22 +561,22 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
542 .priv = info 561 .priv = info
543 }; 562 };
544 struct hwsq_ucode *hwsq = &info->mclk_hwsq; 563 struct hwsq_ucode *hwsq = &info->mclk_hwsq;
545 struct pll_lims pll; 564 struct nvbios_pll pll;
546 int N, M, P; 565 int N, M, P;
547 int ret; 566 int ret;
548 567
549 /* use pcie refclock if possible, otherwise use mpll */ 568 /* use pcie refclock if possible, otherwise use mpll */
550 info->mctrl = nv_rd32(dev, 0x004008); 569 info->mctrl = nv_rd32(device, 0x004008);
551 info->mctrl &= ~0x81ff0200; 570 info->mctrl &= ~0x81ff0200;
552 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) { 571 if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
553 info->mctrl |= 0x00000200 | (pll.log2p_bias << 19); 572 info->mctrl |= 0x00000200 | (pll.bias_p << 19);
554 } else { 573 } else {
555 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P); 574 ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
556 if (ret == 0) 575 if (ret == 0)
557 return -EINVAL; 576 return -EINVAL;
558 577
559 info->mctrl |= 0x80000000 | (P << 22) | (P << 16); 578 info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
560 info->mctrl |= pll.log2p_bias << 19; 579 info->mctrl |= pll.bias_p << 19;
561 info->mcoef = (N << 8) | M; 580 info->mcoef = (N << 8) | M;
562 } 581 }
563 582
@@ -567,7 +586,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
567 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */ 586 hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
568 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */ 587 hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
569 } 588 }
570 if (dev_priv->chipset >= 0x92) 589 if (nv_device(drm->device)->chipset >= 0x92)
571 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */ 590 hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
572 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */ 591 hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
573 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */ 592 hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
@@ -578,7 +597,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
578 597
579 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */ 598 hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
580 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */ 599 hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
581 if (dev_priv->chipset >= 0x92) 600 if (nv_device(drm->device)->chipset >= 0x92)
582 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */ 601 hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
583 hwsq_fini(hwsq); 602 hwsq_fini(hwsq);
584 return 0; 603 return 0;
@@ -587,16 +606,17 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
587void * 606void *
588nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 607nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
589{ 608{
590 struct drm_nouveau_private *dev_priv = dev->dev_private; 609 struct nouveau_device *device = nouveau_dev(dev);
610 struct nouveau_drm *drm = nouveau_drm(dev);
591 struct nv50_pm_state *info; 611 struct nv50_pm_state *info;
592 struct hwsq_ucode *hwsq; 612 struct hwsq_ucode *hwsq;
593 struct pll_lims pll; 613 struct nvbios_pll pll;
594 u32 out, mast, divs, ctrl; 614 u32 out, mast, divs, ctrl;
595 int clk, ret = -EINVAL; 615 int clk, ret = -EINVAL;
596 int N, M, P1, P2; 616 int N, M, P1, P2;
597 617
598 if (dev_priv->chipset == 0xaa || 618 if (nv_device(drm->device)->chipset == 0xaa ||
599 dev_priv->chipset == 0xac) 619 nv_device(drm->device)->chipset == 0xac)
600 return ERR_PTR(-ENODEV); 620 return ERR_PTR(-ENODEV);
601 621
602 info = kmalloc(sizeof(*info), GFP_KERNEL); 622 info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -645,7 +665,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
645 clk = calc_div(perflvl->core, perflvl->vdec, &P1); 665 clk = calc_div(perflvl->core, perflvl->vdec, &P1);
646 666
647 /* see how close we can get using xpll/hclk as a source */ 667 /* see how close we can get using xpll/hclk as a source */
648 if (dev_priv->chipset != 0x98) 668 if (nv_device(drm->device)->chipset != 0x98)
649 out = read_pll(dev, 0x004030); 669 out = read_pll(dev, 0x004030);
650 else 670 else
651 out = read_clk(dev, clk_src_hclkm3d2); 671 out = read_clk(dev, clk_src_hclkm3d2);
@@ -654,7 +674,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
654 /* select whichever gets us closest */ 674 /* select whichever gets us closest */
655 if (abs((int)perflvl->vdec - clk) <= 675 if (abs((int)perflvl->vdec - clk) <=
656 abs((int)perflvl->vdec - out)) { 676 abs((int)perflvl->vdec - out)) {
657 if (dev_priv->chipset != 0x98) 677 if (nv_device(drm->device)->chipset != 0x98)
658 mast |= 0x00000c00; 678 mast |= 0x00000c00;
659 divs |= P1 << 8; 679 divs |= P1 << 8;
660 } else { 680 } else {
@@ -682,7 +702,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
682 } 702 }
683 703
684 /* vdec/dom6: complete switch to new clocks */ 704 /* vdec/dom6: complete switch to new clocks */
685 switch (dev_priv->chipset) { 705 switch (nv_device(drm->device)->chipset) {
686 case 0x92: 706 case 0x92:
687 case 0x94: 707 case 0x94:
688 case 0x96: 708 case 0x96:
@@ -698,7 +718,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
698 /* core/shader: make sure sclk/nvclk are disconnected from their 718 /* core/shader: make sure sclk/nvclk are disconnected from their
699 * PLLs (nvclk to dom6, sclk to hclk) 719 * PLLs (nvclk to dom6, sclk to hclk)
700 */ 720 */
701 if (dev_priv->chipset < 0x92) 721 if (nv_device(drm->device)->chipset < 0x92)
702 mast = (mast & ~0x001000b0) | 0x00100080; 722 mast = (mast & ~0x001000b0) | 0x00100080;
703 else 723 else
704 mast = (mast & ~0x000000b3) | 0x00000081; 724 mast = (mast & ~0x000000b3) | 0x00000081;
@@ -710,7 +730,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
710 if (clk == 0) 730 if (clk == 0)
711 goto error; 731 goto error;
712 732
713 ctrl = nv_rd32(dev, 0x004028) & ~0xc03f0100; 733 ctrl = nv_rd32(device, 0x004028) & ~0xc03f0100;
714 mast &= ~0x00100000; 734 mast &= ~0x00100000;
715 mast |= 3; 735 mast |= 3;
716 736
@@ -723,7 +743,7 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
723 * cases will be handled by tying to nvclk, but it's possible there's 743 * cases will be handled by tying to nvclk, but it's possible there's
724 * corners 744 * corners
725 */ 745 */
726 ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100; 746 ctrl = nv_rd32(device, 0x004020) & ~0xc03f0100;
727 747
728 if (P1-- && perflvl->shader == (perflvl->core << 1)) { 748 if (P1-- && perflvl->shader == (perflvl->core << 1)) {
729 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl); 749 hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
@@ -752,11 +772,12 @@ error:
752static int 772static int
753prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq) 773prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
754{ 774{
755 struct drm_nouveau_private *dev_priv = dev->dev_private; 775 struct nouveau_device *device = nouveau_dev(dev);
776 struct nouveau_drm *drm = nouveau_drm(dev);
756 u32 hwsq_data, hwsq_kick; 777 u32 hwsq_data, hwsq_kick;
757 int i; 778 int i;
758 779
759 if (dev_priv->chipset < 0x94) { 780 if (nv_device(drm->device)->chipset < 0x94) {
760 hwsq_data = 0x001400; 781 hwsq_data = 0x001400;
761 hwsq_kick = 0x00000003; 782 hwsq_kick = 0x00000003;
762 } else { 783 } else {
@@ -764,22 +785,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
764 hwsq_kick = 0x00000001; 785 hwsq_kick = 0x00000001;
765 } 786 }
766 /* upload hwsq ucode */ 787 /* upload hwsq ucode */
767 nv_mask(dev, 0x001098, 0x00000008, 0x00000000); 788 nv_mask(device, 0x001098, 0x00000008, 0x00000000);
768 nv_wr32(dev, 0x001304, 0x00000000); 789 nv_wr32(device, 0x001304, 0x00000000);
769 if (dev_priv->chipset >= 0x92) 790 if (nv_device(drm->device)->chipset >= 0x92)
770 nv_wr32(dev, 0x001318, 0x00000000); 791 nv_wr32(device, 0x001318, 0x00000000);
771 for (i = 0; i < hwsq->len / 4; i++) 792 for (i = 0; i < hwsq->len / 4; i++)
772 nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]); 793 nv_wr32(device, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
773 nv_mask(dev, 0x001098, 0x00000018, 0x00000018); 794 nv_mask(device, 0x001098, 0x00000018, 0x00000018);
774 795
775 /* launch, and wait for completion */ 796 /* launch, and wait for completion */
776 nv_wr32(dev, 0x00130c, hwsq_kick); 797 nv_wr32(device, 0x00130c, hwsq_kick);
777 if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) { 798 if (!nv_wait(device, 0x001308, 0x00000100, 0x00000000)) {
778 NV_ERROR(dev, "hwsq ucode exec timed out\n"); 799 NV_ERROR(drm, "hwsq ucode exec timed out\n");
779 NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308)); 800 NV_ERROR(drm, "0x001308: 0x%08x\n", nv_rd32(device, 0x001308));
780 for (i = 0; i < hwsq->len / 4; i++) { 801 for (i = 0; i < hwsq->len / 4; i++) {
781 NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4), 802 NV_ERROR(drm, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
782 nv_rd32(dev, 0x001400 + (i * 4))); 803 nv_rd32(device, 0x001400 + (i * 4)));
783 } 804 }
784 805
785 return -EIO; 806 return -EIO;
@@ -791,20 +812,22 @@ prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
791int 812int
792nv50_pm_clocks_set(struct drm_device *dev, void *data) 813nv50_pm_clocks_set(struct drm_device *dev, void *data)
793{ 814{
815 struct nouveau_device *device = nouveau_dev(dev);
794 struct nv50_pm_state *info = data; 816 struct nv50_pm_state *info = data;
795 struct bit_entry M; 817 struct bit_entry M;
796 int ret = -EBUSY; 818 int ret = -EBUSY;
797 819
798 /* halt and idle execution engines */ 820 /* halt and idle execution engines */
799 nv_mask(dev, 0x002504, 0x00000001, 0x00000001); 821 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
800 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) 822 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010))
801 goto resume; 823 goto resume;
802 if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f)) 824 if (!nv_wait(device, 0x00251c, 0x0000003f, 0x0000003f))
803 goto resume; 825 goto resume;
804 826
805 /* program memory clock, if necessary - must come before engine clock 827 /* program memory clock, if necessary - must come before engine clock
806 * reprogramming due to how we construct the hwsq scripts in pre() 828 * reprogramming due to how we construct the hwsq scripts in pre()
807 */ 829 */
830#define nouveau_bios_init_exec(a,b) nouveau_bios_run_init_table((a), (b), NULL, 0)
808 if (info->mclk_hwsq.len) { 831 if (info->mclk_hwsq.len) {
809 /* execute some scripts that do ??? from the vbios.. */ 832 /* execute some scripts that do ??? from the vbios.. */
810 if (!bit_table(dev, 'M', &M) && M.version == 1) { 833 if (!bit_table(dev, 'M', &M) && M.version == 1) {
@@ -826,61 +849,7 @@ nv50_pm_clocks_set(struct drm_device *dev, void *data)
826 ret = prog_hwsq(dev, &info->eclk_hwsq); 849 ret = prog_hwsq(dev, &info->eclk_hwsq);
827 850
828resume: 851resume:
829 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 852 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
830 kfree(info); 853 kfree(info);
831 return ret; 854 return ret;
832} 855}
833
834static int
835pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
836{
837 if (*line == 0x04) {
838 *ctrl = 0x00e100;
839 *line = 4;
840 *indx = 0;
841 } else
842 if (*line == 0x09) {
843 *ctrl = 0x00e100;
844 *line = 9;
845 *indx = 1;
846 } else
847 if (*line == 0x10) {
848 *ctrl = 0x00e28c;
849 *line = 0;
850 *indx = 0;
851 } else {
852 NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
853 return -ENODEV;
854 }
855
856 return 0;
857}
858
859int
860nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
861{
862 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
863 if (ret)
864 return ret;
865
866 if (nv_rd32(dev, ctrl) & (1 << line)) {
867 *divs = nv_rd32(dev, 0x00e114 + (id * 8));
868 *duty = nv_rd32(dev, 0x00e118 + (id * 8));
869 return 0;
870 }
871
872 return -EINVAL;
873}
874
875int
876nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
877{
878 int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
879 if (ret)
880 return ret;
881
882 nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
883 nv_wr32(dev, 0x00e114 + (id * 8), divs);
884 nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
885 return 0;
886}
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
deleted file mode 100644
index 5497a6ce25b4..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nv50_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nv50_software_chan {
38 struct nouveau_software_chan base;
39};
40
41static int
42mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
43{
44 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
45 struct nouveau_gpuobj *gpuobj;
46
47 gpuobj = nouveau_ramht_find(chan, data);
48 if (!gpuobj)
49 return -ENOENT;
50
51 pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
52 return 0;
53}
54
55static int
56mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
57{
58 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
59 pch->base.vblank.offset = data;
60 return 0;
61}
62
63static int
64mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
65{
66 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
67 pch->base.vblank.value = data;
68 return 0;
69}
70
71static int
72mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
73{
74 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
75 struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
76 struct drm_device *dev = chan->dev;
77
78 if (data > 1)
79 return -EINVAL;
80
81 drm_vblank_get(dev, data);
82
83 pch->base.vblank.head = data;
84 list_add(&pch->base.vblank.list, &psw->base.vblank);
85 return 0;
86}
87
88static int
89mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
90{
91 nouveau_finish_page_flip(chan, NULL);
92 return 0;
93}
94
95static int
96nv50_software_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
99 struct nv50_display *pdisp = nv50_display(chan->dev);
100 struct nv50_software_chan *pch;
101 int ret = 0, i;
102
103 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
104 if (!pch)
105 return -ENOMEM;
106
107 nouveau_software_context_new(&pch->base);
108 pch->base.vblank.channel = chan->ramin->vinst >> 12;
109 chan->engctx[engine] = pch;
110
111 /* dma objects for display sync channel semaphore blocks */
112 for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
113 struct nv50_display_crtc *dispc = &pdisp->crtc[i];
114 struct nouveau_gpuobj *obj = NULL;
115
116 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
117 dispc->sem.bo->bo.offset, 0x1000,
118 NV_MEM_ACCESS_RW,
119 NV_MEM_TARGET_VRAM, &obj);
120 if (ret)
121 break;
122
123 ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
124 nouveau_gpuobj_ref(NULL, &obj);
125 }
126
127 if (ret)
128 psw->base.base.context_del(chan, engine);
129 return ret;
130}
131
132static void
133nv50_software_context_del(struct nouveau_channel *chan, int engine)
134{
135 struct nv50_software_chan *pch = chan->engctx[engine];
136 chan->engctx[engine] = NULL;
137 kfree(pch);
138}
139
140static int
141nv50_software_object_new(struct nouveau_channel *chan, int engine,
142 u32 handle, u16 class)
143{
144 struct drm_device *dev = chan->dev;
145 struct nouveau_gpuobj *obj = NULL;
146 int ret;
147
148 ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
149 if (ret)
150 return ret;
151 obj->engine = 0;
152 obj->class = class;
153
154 ret = nouveau_ramht_insert(chan, handle, obj);
155 nouveau_gpuobj_ref(NULL, &obj);
156 return ret;
157}
158
159static int
160nv50_software_init(struct drm_device *dev, int engine)
161{
162 return 0;
163}
164
165static int
166nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
167{
168 return 0;
169}
170
171static void
172nv50_software_destroy(struct drm_device *dev, int engine)
173{
174 struct nv50_software_priv *psw = nv_engine(dev, engine);
175
176 NVOBJ_ENGINE_DEL(dev, SW);
177 kfree(psw);
178}
179
180int
181nv50_software_create(struct drm_device *dev)
182{
183 struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
184 if (!psw)
185 return -ENOMEM;
186
187 psw->base.base.destroy = nv50_software_destroy;
188 psw->base.base.init = nv50_software_init;
189 psw->base.base.fini = nv50_software_fini;
190 psw->base.base.context_new = nv50_software_context_new;
191 psw->base.base.context_del = nv50_software_context_del;
192 psw->base.base.object_new = nv50_software_object_new;
193 nouveau_software_create(&psw->base);
194
195 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
196 NVOBJ_CLASS(dev, 0x506e, SW);
197 NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
198 NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
199 NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
200 NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
201 NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 63ece8503a11..b562b59e1326 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -29,35 +29,40 @@
29 29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) 30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h" 31#include "nouveau_reg.h"
32#include "nouveau_drv.h" 32#include "nouveau_drm.h"
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
35#include "nouveau_connector.h" 35#include "nouveau_connector.h"
36#include "nouveau_crtc.h" 36#include "nouveau_crtc.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <subdev/timer.h>
40
39static u32 41static u32
40nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) 42nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
41{ 43{
42 struct drm_nouveau_private *dev_priv = dev->dev_private; 44 struct nouveau_drm *drm = nouveau_drm(dev);
43 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 45 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
44 static const u8 nv50[] = { 16, 8, 0, 24 }; 46 static const u8 nv50[] = { 16, 8, 0, 24 };
45 if (dev_priv->chipset == 0xaf) 47 if (nv_device(drm->device)->chipset == 0xaf)
46 return nvaf[lane]; 48 return nvaf[lane];
47 return nv50[lane]; 49 return nv50[lane];
48} 50}
49 51
50static void 52static void
51nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) 53nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
52{ 54{
55 struct nouveau_device *device = nouveau_dev(dev);
53 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 56 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
54 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24); 57 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
55} 58}
56 59
57static void 60static void
58nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, 61nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
59 u8 lane, u8 swing, u8 preem) 62 u8 lane, u8 swing, u8 preem)
60{ 63{
64 struct nouveau_device *device = nouveau_dev(dev);
65 struct nouveau_drm *drm = nouveau_drm(dev);
61 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 66 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
62 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane); 67 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
63 u32 mask = 0x000000ff << shift; 68 u32 mask = 0x000000ff << shift;
@@ -65,7 +70,7 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
65 70
66 table = nouveau_dp_bios_data(dev, dcb, &entry); 71 table = nouveau_dp_bios_data(dev, dcb, &entry);
67 if (!table || (table[0] != 0x20 && table[0] != 0x21)) { 72 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
68 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 73 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
69 return; 74 return;
70 } 75 }
71 76
@@ -76,24 +81,26 @@ nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
76 return; 81 return;
77 } 82 }
78 83
79 nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift); 84 nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
80 nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift); 85 nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
81 nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8); 86 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
82} 87}
83 88
84static void 89static void
85nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, 90nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
86 int link_nr, u32 link_bw, bool enhframe) 91 int link_nr, u32 link_bw, bool enhframe)
87{ 92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 struct nouveau_drm *drm = nouveau_drm(dev);
88 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 95 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
89 u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000; 96 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
90 u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000; 97 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
91 u8 *table, *entry, mask; 98 u8 *table, *entry, mask;
92 int i; 99 int i;
93 100
94 table = nouveau_dp_bios_data(dev, dcb, &entry); 101 table = nouveau_dp_bios_data(dev, dcb, &entry);
95 if (!table || (table[0] != 0x20 && table[0] != 0x21)) { 102 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
96 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 103 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
97 return; 104 return;
98 } 105 }
99 106
@@ -112,20 +119,21 @@ nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
112 if (link_bw > 162000) 119 if (link_bw > 162000)
113 clksor |= 0x00040000; 120 clksor |= 0x00040000;
114 121
115 nv_wr32(dev, 0x614300 + (or * 0x800), clksor); 122 nv_wr32(device, 0x614300 + (or * 0x800), clksor);
116 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl); 123 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
117 124
118 mask = 0; 125 mask = 0;
119 for (i = 0; i < link_nr; i++) 126 for (i = 0; i < link_nr; i++)
120 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3); 127 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
121 nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask); 128 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
122} 129}
123 130
124static void 131static void
125nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw) 132nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
126{ 133{
127 u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000; 134 struct nouveau_device *device = nouveau_dev(dev);
128 u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)); 135 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
136 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
129 if (clksor & 0x000c0000) 137 if (clksor & 0x000c0000)
130 *bw = 270000; 138 *bw = 270000;
131 else 139 else
@@ -139,6 +147,8 @@ nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
139void 147void
140nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp) 148nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
141{ 149{
150 struct nouveau_device *device = nouveau_dev(dev);
151 struct nouveau_drm *drm = nouveau_drm(dev);
142 const u32 symbol = 100000; 152 const u32 symbol = 100000;
143 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0; 153 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
144 int TU, VTUi, VTUf, VTUa; 154 int TU, VTUi, VTUf, VTUa;
@@ -206,7 +216,7 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
206 } 216 }
207 217
208 if (!bestTU) { 218 if (!bestTU) {
209 NV_ERROR(dev, "DP: unable to find suitable config\n"); 219 NV_ERROR(drm, "DP: unable to find suitable config\n");
210 return; 220 return;
211 } 221 }
212 222
@@ -217,8 +227,8 @@ nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
217 r = do_div(unk, symbol); 227 r = do_div(unk, symbol);
218 unk += 6; 228 unk += 6;
219 229
220 nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2); 230 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
221 nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 | 231 nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
222 bestVTUf << 16 | 232 bestVTUf << 16 |
223 bestVTUi << 8 | 233 bestVTUi << 8 |
224 unk); 234 unk);
@@ -227,6 +237,7 @@ static void
227nv50_sor_disconnect(struct drm_encoder *encoder) 237nv50_sor_disconnect(struct drm_encoder *encoder)
228{ 238{
229 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 239 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
240 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
230 struct drm_device *dev = encoder->dev; 241 struct drm_device *dev = encoder->dev;
231 struct nouveau_channel *evo = nv50_display(dev)->master; 242 struct nouveau_channel *evo = nv50_display(dev)->master;
232 int ret; 243 int ret;
@@ -235,11 +246,11 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
235 return; 246 return;
236 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); 247 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
237 248
238 NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); 249 NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
239 250
240 ret = RING_SPACE(evo, 4); 251 ret = RING_SPACE(evo, 4);
241 if (ret) { 252 if (ret) {
242 NV_ERROR(dev, "no space while disconnecting SOR\n"); 253 NV_ERROR(drm, "no space while disconnecting SOR\n");
243 return; 254 return;
244 } 255 }
245 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); 256 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
@@ -256,22 +267,24 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
256static void 267static void
257nv50_sor_dpms(struct drm_encoder *encoder, int mode) 268nv50_sor_dpms(struct drm_encoder *encoder, int mode)
258{ 269{
270 struct nouveau_device *device = nouveau_dev(encoder->dev);
271 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
259 struct drm_device *dev = encoder->dev; 272 struct drm_device *dev = encoder->dev;
260 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
261 struct drm_encoder *enc; 274 struct drm_encoder *enc;
262 uint32_t val; 275 uint32_t val;
263 int or = nv_encoder->or; 276 int or = nv_encoder->or;
264 277
265 NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); 278 NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
266 279
267 nv_encoder->last_dpms = mode; 280 nv_encoder->last_dpms = mode;
268 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { 281 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
269 struct nouveau_encoder *nvenc = nouveau_encoder(enc); 282 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
270 283
271 if (nvenc == nv_encoder || 284 if (nvenc == nv_encoder ||
272 (nvenc->dcb->type != OUTPUT_TMDS && 285 (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
273 nvenc->dcb->type != OUTPUT_LVDS && 286 nvenc->dcb->type != DCB_OUTPUT_LVDS &&
274 nvenc->dcb->type != OUTPUT_DP) || 287 nvenc->dcb->type != DCB_OUTPUT_DP) ||
275 nvenc->dcb->or != nv_encoder->dcb->or) 288 nvenc->dcb->or != nv_encoder->dcb->or)
276 continue; 289 continue;
277 290
@@ -280,30 +293,30 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
280 } 293 }
281 294
282 /* wait for it to be done */ 295 /* wait for it to be done */
283 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), 296 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
284 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) { 297 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
285 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or); 298 NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
286 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or, 299 NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
287 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or))); 300 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
288 } 301 }
289 302
290 val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)); 303 val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
291 304
292 if (mode == DRM_MODE_DPMS_ON) 305 if (mode == DRM_MODE_DPMS_ON)
293 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON; 306 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
294 else 307 else
295 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON; 308 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
296 309
297 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val | 310 nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
298 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING); 311 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
299 if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or), 312 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
300 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 313 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
301 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or); 314 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
302 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or, 315 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
303 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); 316 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
304 } 317 }
305 318
306 if (nv_encoder->dcb->type == OUTPUT_DP) { 319 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
307 struct dp_train_func func = { 320 struct dp_train_func func = {
308 .link_set = nv50_sor_dp_link_set, 321 .link_set = nv50_sor_dp_link_set,
309 .train_set = nv50_sor_dp_train_set, 322 .train_set = nv50_sor_dp_train_set,
@@ -317,13 +330,15 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
317static void 330static void
318nv50_sor_save(struct drm_encoder *encoder) 331nv50_sor_save(struct drm_encoder *encoder)
319{ 332{
320 NV_ERROR(encoder->dev, "!!\n"); 333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 NV_ERROR(drm, "!!\n");
321} 335}
322 336
323static void 337static void
324nv50_sor_restore(struct drm_encoder *encoder) 338nv50_sor_restore(struct drm_encoder *encoder)
325{ 339{
326 NV_ERROR(encoder->dev, "!!\n"); 340 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
341 NV_ERROR(drm, "!!\n");
327} 342}
328 343
329static bool 344static bool
@@ -331,14 +346,15 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder,
331 const struct drm_display_mode *mode, 346 const struct drm_display_mode *mode,
332 struct drm_display_mode *adjusted_mode) 347 struct drm_display_mode *adjusted_mode)
333{ 348{
349 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 350 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
335 struct nouveau_connector *connector; 351 struct nouveau_connector *connector;
336 352
337 NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); 353 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
338 354
339 connector = nouveau_encoder_connector_get(nv_encoder); 355 connector = nouveau_encoder_connector_get(nv_encoder);
340 if (!connector) { 356 if (!connector) {
341 NV_ERROR(encoder->dev, "Encoder has no connector\n"); 357 NV_ERROR(drm, "Encoder has no connector\n");
342 return false; 358 return false;
343 } 359 }
344 360
@@ -354,7 +370,7 @@ nv50_sor_prepare(struct drm_encoder *encoder)
354{ 370{
355 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 371 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
356 nv50_sor_disconnect(encoder); 372 nv50_sor_disconnect(encoder);
357 if (nv_encoder->dcb->type == OUTPUT_DP) { 373 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
358 /* avoid race between link training and supervisor intr */ 374 /* avoid race between link training and supervisor intr */
359 nv50_display_sync(encoder->dev); 375 nv50_display_sync(encoder->dev);
360 } 376 }
@@ -371,18 +387,18 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
371{ 387{
372 struct nouveau_channel *evo = nv50_display(encoder->dev)->master; 388 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
373 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 389 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
374 struct drm_device *dev = encoder->dev; 390 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
375 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc); 391 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
376 struct nouveau_connector *nv_connector; 392 struct nouveau_connector *nv_connector;
377 uint32_t mode_ctl = 0; 393 uint32_t mode_ctl = 0;
378 int ret; 394 int ret;
379 395
380 NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", 396 NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
381 nv_encoder->or, nv_encoder->dcb->type, crtc->index); 397 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
382 nv_encoder->crtc = encoder->crtc; 398 nv_encoder->crtc = encoder->crtc;
383 399
384 switch (nv_encoder->dcb->type) { 400 switch (nv_encoder->dcb->type) {
385 case OUTPUT_TMDS: 401 case DCB_OUTPUT_TMDS:
386 if (nv_encoder->dcb->sorconf.link & 1) { 402 if (nv_encoder->dcb->sorconf.link & 1) {
387 if (mode->clock < 165000) 403 if (mode->clock < 165000)
388 mode_ctl = 0x0100; 404 mode_ctl = 0x0100;
@@ -393,7 +409,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
393 409
394 nouveau_hdmi_mode_set(encoder, mode); 410 nouveau_hdmi_mode_set(encoder, mode);
395 break; 411 break;
396 case OUTPUT_DP: 412 case DCB_OUTPUT_DP:
397 nv_connector = nouveau_encoder_connector_get(nv_encoder); 413 nv_connector = nouveau_encoder_connector_get(nv_encoder);
398 if (nv_connector && nv_connector->base.display_info.bpc == 6) { 414 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
399 nv_encoder->dp.datarate = mode->clock * 18 / 8; 415 nv_encoder->dp.datarate = mode->clock * 18 / 8;
@@ -427,7 +443,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
427 443
428 ret = RING_SPACE(evo, 2); 444 ret = RING_SPACE(evo, 2);
429 if (ret) { 445 if (ret) {
430 NV_ERROR(dev, "no space while connecting SOR\n"); 446 NV_ERROR(drm, "no space while connecting SOR\n");
431 nv_encoder->crtc = NULL; 447 nv_encoder->crtc = NULL;
432 return; 448 return;
433 } 449 }
@@ -458,11 +474,9 @@ static void
458nv50_sor_destroy(struct drm_encoder *encoder) 474nv50_sor_destroy(struct drm_encoder *encoder)
459{ 475{
460 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 476 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
477 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
461 478
462 if (!encoder) 479 NV_DEBUG(drm, "\n");
463 return;
464
465 NV_DEBUG_KMS(encoder->dev, "\n");
466 480
467 drm_encoder_cleanup(encoder); 481 drm_encoder_cleanup(encoder);
468 482
@@ -474,21 +488,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
474}; 488};
475 489
476int 490int
477nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) 491nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
478{ 492{
479 struct nouveau_encoder *nv_encoder = NULL; 493 struct nouveau_encoder *nv_encoder = NULL;
480 struct drm_device *dev = connector->dev; 494 struct drm_device *dev = connector->dev;
495 struct nouveau_drm *drm = nouveau_drm(dev);
481 struct drm_encoder *encoder; 496 struct drm_encoder *encoder;
482 int type; 497 int type;
483 498
484 NV_DEBUG_KMS(dev, "\n"); 499 NV_DEBUG(drm, "\n");
485 500
486 switch (entry->type) { 501 switch (entry->type) {
487 case OUTPUT_TMDS: 502 case DCB_OUTPUT_TMDS:
488 case OUTPUT_DP: 503 case DCB_OUTPUT_DP:
489 type = DRM_MODE_ENCODER_TMDS; 504 type = DRM_MODE_ENCODER_TMDS;
490 break; 505 break;
491 case OUTPUT_LVDS: 506 case DCB_OUTPUT_LVDS:
492 type = DRM_MODE_ENCODER_LVDS; 507 type = DRM_MODE_ENCODER_LVDS;
493 break; 508 break;
494 default: 509 default:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
deleted file mode 100644
index e2a1af7b9eef..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29static int types[0x80] = {
30 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
31 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
32 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
33 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
34 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
35 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
36 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
37 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
38};
39
40bool
41nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
42{
43 int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
44
45 if (likely(type < ARRAY_SIZE(types) && types[type]))
46 return true;
47 return false;
48}
49
50void
51nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
52{
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
55 struct nouveau_mm_node *this;
56 struct nouveau_mem *mem;
57
58 mem = *pmem;
59 *pmem = NULL;
60 if (unlikely(mem == NULL))
61 return;
62
63 mutex_lock(&mm->mutex);
64 while (!list_empty(&mem->regions)) {
65 this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
66
67 list_del(&this->rl_entry);
68 nouveau_mm_put(mm, this);
69 }
70
71 if (mem->tag) {
72 drm_mm_put_block(mem->tag);
73 mem->tag = NULL;
74 }
75 mutex_unlock(&mm->mutex);
76
77 kfree(mem);
78}
79
80int
81nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
82 u32 memtype, struct nouveau_mem **pmem)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
86 struct nouveau_mm_node *r;
87 struct nouveau_mem *mem;
88 int comp = (memtype & 0x300) >> 8;
89 int type = (memtype & 0x07f);
90 int ret;
91
92 if (!types[type])
93 return -EINVAL;
94 size >>= 12;
95 align >>= 12;
96 size_nc >>= 12;
97
98 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
99 if (!mem)
100 return -ENOMEM;
101
102 mutex_lock(&mm->mutex);
103 if (comp) {
104 if (align == 16) {
105 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
106 int n = (size >> 4) * comp;
107
108 mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
109 if (mem->tag)
110 mem->tag = drm_mm_get_block(mem->tag, n, 0);
111 }
112
113 if (unlikely(!mem->tag))
114 comp = 0;
115 }
116
117 INIT_LIST_HEAD(&mem->regions);
118 mem->dev = dev_priv->dev;
119 mem->memtype = (comp << 7) | type;
120 mem->size = size;
121
122 do {
123 ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
124 if (ret) {
125 mutex_unlock(&mm->mutex);
126 nv50_vram_del(dev, &mem);
127 return ret;
128 }
129
130 list_add_tail(&r->rl_entry, &mem->regions);
131 size -= r->length;
132 } while (size);
133 mutex_unlock(&mm->mutex);
134
135 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
136 mem->offset = (u64)r->offset << 12;
137 *pmem = mem;
138 return 0;
139}
140
141static u32
142nv50_vram_rblock(struct drm_device *dev)
143{
144 struct drm_nouveau_private *dev_priv = dev->dev_private;
145 int i, parts, colbits, rowbitsa, rowbitsb, banks;
146 u64 rowsize, predicted;
147 u32 r0, r4, rt, ru, rblock_size;
148
149 r0 = nv_rd32(dev, 0x100200);
150 r4 = nv_rd32(dev, 0x100204);
151 rt = nv_rd32(dev, 0x100250);
152 ru = nv_rd32(dev, 0x001540);
153 NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
154
155 for (i = 0, parts = 0; i < 8; i++) {
156 if (ru & (0x00010000 << i))
157 parts++;
158 }
159
160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164
165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa;
167 if (r0 & 0x00000004)
168 predicted += rowsize << rowbitsb;
169
170 if (predicted != dev_priv->vram_size) {
171 NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
172 (u32)(dev_priv->vram_size >> 20));
173 NV_WARN(dev, "we calculated %dMiB VRAM\n",
174 (u32)(predicted >> 20));
175 }
176
177 rblock_size = rowsize;
178 if (rt & 1)
179 rblock_size *= 3;
180
181 NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
182 return rblock_size;
183}
184
185int
186nv50_vram_init(struct drm_device *dev)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
190 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
191 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
192 u32 pfb714 = nv_rd32(dev, 0x100714);
193 u32 rblock, length;
194
195 switch (pfb714 & 0x00000007) {
196 case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
197 case 1:
198 if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
199 dev_priv->vram_type = NV_MEM_TYPE_DDR3;
200 else
201 dev_priv->vram_type = NV_MEM_TYPE_DDR2;
202 break;
203 case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
204 case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
205 case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
206 default:
207 break;
208 }
209
210 dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
211 dev_priv->vram_size = nv_rd32(dev, 0x10020c);
212 dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
213 dev_priv->vram_size &= 0xffffffff00ULL;
214
215 /* IGPs, no funky reordering happens here, they don't have VRAM */
216 if (dev_priv->chipset == 0xaa ||
217 dev_priv->chipset == 0xac ||
218 dev_priv->chipset == 0xaf) {
219 dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
220 rblock = 4096 >> 12;
221 } else {
222 rblock = nv50_vram_rblock(dev) >> 12;
223 }
224
225 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
226
227 return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
228}
229
230void
231nv50_vram_fini(struct drm_device *dev)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
235
236 nouveau_mm_fini(&vram->mm);
237}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
deleted file mode 100644
index a4f4d4a0a755..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_bsp.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_bsp.c...
34 */
35
36struct nv84_bsp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00008000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_bsp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
55 return 0;
56}
57
58static void
59nv84_bsp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, BSP);
64
65 kfree(pbsp);
66}
67
68int
69nv84_bsp_create(struct drm_device *dev)
70{
71 struct nv84_bsp_engine *pbsp;
72
73 pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
74 if (!pbsp)
75 return -ENOMEM;
76
77 pbsp->base.destroy = nv84_bsp_destroy;
78 pbsp->base.init = nv84_bsp_init;
79 pbsp->base.fini = nv84_bsp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
deleted file mode 100644
index dc2bc5cc536d..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31struct nv84_crypt_engine {
32 struct nouveau_exec_engine base;
33};
34
35static int
36nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
37{
38 struct drm_device *dev = chan->dev;
39 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 struct nouveau_gpuobj *ramin = chan->ramin;
41 struct nouveau_gpuobj *ctx;
42 int ret;
43
44 NV_DEBUG(dev, "ch%d\n", chan->id);
45
46 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
47 NVOBJ_FLAG_ZERO_FREE, &ctx);
48 if (ret)
49 return ret;
50
51 nv_wo32(ramin, 0xa0, 0x00190000);
52 nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
53 nv_wo32(ramin, 0xa8, ctx->vinst);
54 nv_wo32(ramin, 0xac, 0);
55 nv_wo32(ramin, 0xb0, 0);
56 nv_wo32(ramin, 0xb4, 0);
57 dev_priv->engine.instmem.flush(dev);
58
59 atomic_inc(&chan->vm->engref[engine]);
60 chan->engctx[engine] = ctx;
61 return 0;
62}
63
64static void
65nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
66{
67 struct nouveau_gpuobj *ctx = chan->engctx[engine];
68 struct drm_device *dev = chan->dev;
69 u32 inst;
70
71 inst = (chan->ramin->vinst >> 12);
72 inst |= 0x80000000;
73
74 /* mark context as invalid if still on the hardware, not
75 * doing this causes issues the next time PCRYPT is used,
76 * unsurprisingly :)
77 */
78 nv_wr32(dev, 0x10200c, 0x00000000);
79 if (nv_rd32(dev, 0x102188) == inst)
80 nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
81 if (nv_rd32(dev, 0x10218c) == inst)
82 nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
83 nv_wr32(dev, 0x10200c, 0x00000010);
84
85 nouveau_gpuobj_ref(NULL, &ctx);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89}
90
91static int
92nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
93 u32 handle, u16 class)
94{
95 struct drm_device *dev = chan->dev;
96 struct drm_nouveau_private *dev_priv = dev->dev_private;
97 struct nouveau_gpuobj *obj = NULL;
98 int ret;
99
100 ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
101 if (ret)
102 return ret;
103 obj->engine = 5;
104 obj->class = class;
105
106 nv_wo32(obj, 0x00, class);
107 dev_priv->engine.instmem.flush(dev);
108
109 ret = nouveau_ramht_insert(chan, handle, obj);
110 nouveau_gpuobj_ref(NULL, &obj);
111 return ret;
112}
113
114static void
115nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
116{
117 nv50_vm_flush_engine(dev, 0x0a);
118}
119
120static struct nouveau_bitfield nv84_crypt_intr[] = {
121 { 0x00000001, "INVALID_STATE" },
122 { 0x00000002, "ILLEGAL_MTHD" },
123 { 0x00000004, "ILLEGAL_CLASS" },
124 { 0x00000080, "QUERY" },
125 { 0x00000100, "FAULT" },
126 {}
127};
128
129static void
130nv84_crypt_isr(struct drm_device *dev)
131{
132 u32 stat = nv_rd32(dev, 0x102130);
133 u32 mthd = nv_rd32(dev, 0x102190);
134 u32 data = nv_rd32(dev, 0x102194);
135 u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
136 int show = nouveau_ratelimit();
137 int chid = nv50_graph_isr_chid(dev, inst);
138
139 if (show) {
140 NV_INFO(dev, "PCRYPT:");
141 nouveau_bitfield_print(nv84_crypt_intr, stat);
142 printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
143 chid, inst, mthd, data);
144 }
145
146 nv_wr32(dev, 0x102130, stat);
147 nv_wr32(dev, 0x10200c, 0x10);
148
149 nv50_fb_vm_trap(dev, show);
150}
151
152static int
153nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
154{
155 nv_wr32(dev, 0x102140, 0x00000000);
156 return 0;
157}
158
159static int
160nv84_crypt_init(struct drm_device *dev, int engine)
161{
162 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
163 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
164
165 nv_wr32(dev, 0x102130, 0xffffffff);
166 nv_wr32(dev, 0x102140, 0xffffffbf);
167
168 nv_wr32(dev, 0x10200c, 0x00000010);
169 return 0;
170}
171
172static void
173nv84_crypt_destroy(struct drm_device *dev, int engine)
174{
175 struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
176
177 NVOBJ_ENGINE_DEL(dev, CRYPT);
178
179 nouveau_irq_unregister(dev, 14);
180 kfree(pcrypt);
181}
182
183int
184nv84_crypt_create(struct drm_device *dev)
185{
186 struct nv84_crypt_engine *pcrypt;
187
188 pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
189 if (!pcrypt)
190 return -ENOMEM;
191
192 pcrypt->base.destroy = nv84_crypt_destroy;
193 pcrypt->base.init = nv84_crypt_init;
194 pcrypt->base.fini = nv84_crypt_fini;
195 pcrypt->base.context_new = nv84_crypt_context_new;
196 pcrypt->base.context_del = nv84_crypt_context_del;
197 pcrypt->base.object_new = nv84_crypt_object_new;
198 pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
199
200 nouveau_irq_register(dev, 14, nv84_crypt_isr);
201
202 NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
203 NVOBJ_CLASS (dev, 0x74c1, CRYPT);
204 return 0;
205}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 60dd73d532e7..c686650584b6 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,13 +22,17 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/class.h>
27
28#include <engine/fifo.h>
29
30#include "nouveau_drm.h"
27#include "nouveau_dma.h" 31#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h" 32#include "nouveau_fence.h"
31 33
34#include "nv50_display.h"
35
32struct nv84_fence_chan { 36struct nv84_fence_chan {
33 struct nouveau_fence_chan base; 37 struct nouveau_fence_chan base;
34}; 38};
@@ -42,13 +46,14 @@ static int
42nv84_fence_emit(struct nouveau_fence *fence) 46nv84_fence_emit(struct nouveau_fence *fence)
43{ 47{
44 struct nouveau_channel *chan = fence->channel; 48 struct nouveau_channel *chan = fence->channel;
49 struct nouveau_fifo_chan *fifo = (void *)chan->object;
45 int ret = RING_SPACE(chan, 7); 50 int ret = RING_SPACE(chan, 7);
46 if (ret == 0) { 51 if (ret == 0) {
47 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 52 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
48 OUT_RING (chan, NvSema); 53 OUT_RING (chan, NvSema);
49 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 54 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
50 OUT_RING (chan, upper_32_bits(chan->id * 16)); 55 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
51 OUT_RING (chan, lower_32_bits(chan->id * 16)); 56 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
52 OUT_RING (chan, fence->sequence); 57 OUT_RING (chan, fence->sequence);
53 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 58 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
54 FIRE_RING (chan); 59 FIRE_RING (chan);
@@ -61,13 +66,14 @@ static int
61nv84_fence_sync(struct nouveau_fence *fence, 66nv84_fence_sync(struct nouveau_fence *fence,
62 struct nouveau_channel *prev, struct nouveau_channel *chan) 67 struct nouveau_channel *prev, struct nouveau_channel *chan)
63{ 68{
69 struct nouveau_fifo_chan *fifo = (void *)prev->object;
64 int ret = RING_SPACE(chan, 7); 70 int ret = RING_SPACE(chan, 7);
65 if (ret == 0) { 71 if (ret == 0) {
66 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 72 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
67 OUT_RING (chan, NvSema); 73 OUT_RING (chan, NvSema);
68 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 74 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
69 OUT_RING (chan, upper_32_bits(prev->id * 16)); 75 OUT_RING (chan, upper_32_bits(fifo->chid * 16));
70 OUT_RING (chan, lower_32_bits(prev->id * 16)); 76 OUT_RING (chan, lower_32_bits(fifo->chid * 16));
71 OUT_RING (chan, fence->sequence); 77 OUT_RING (chan, fence->sequence);
72 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); 78 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
73 FIRE_RING (chan); 79 FIRE_RING (chan);
@@ -78,100 +84,99 @@ nv84_fence_sync(struct nouveau_fence *fence,
78static u32 84static u32
79nv84_fence_read(struct nouveau_channel *chan) 85nv84_fence_read(struct nouveau_channel *chan)
80{ 86{
81 struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 87 struct nouveau_fifo_chan *fifo = (void *)chan->object;
82 return nv_ro32(priv->mem, chan->id * 16); 88 struct nv84_fence_priv *priv = chan->drm->fence;
89 return nv_ro32(priv->mem, fifo->chid * 16);
83} 90}
84 91
85static void 92static void
86nv84_fence_context_del(struct nouveau_channel *chan, int engine) 93nv84_fence_context_del(struct nouveau_channel *chan)
87{ 94{
88 struct nv84_fence_chan *fctx = chan->engctx[engine]; 95 struct nv84_fence_chan *fctx = chan->fence;
89 nouveau_fence_context_del(&fctx->base); 96 nouveau_fence_context_del(&fctx->base);
90 chan->engctx[engine] = NULL; 97 chan->fence = NULL;
91 kfree(fctx); 98 kfree(fctx);
92} 99}
93 100
94static int 101static int
95nv84_fence_context_new(struct nouveau_channel *chan, int engine) 102nv84_fence_context_new(struct nouveau_channel *chan)
96{ 103{
97 struct nv84_fence_priv *priv = nv_engine(chan->dev, engine); 104 struct drm_device *dev = chan->drm->dev;
105 struct nouveau_fifo_chan *fifo = (void *)chan->object;
106 struct nv84_fence_priv *priv = chan->drm->fence;
98 struct nv84_fence_chan *fctx; 107 struct nv84_fence_chan *fctx;
99 struct nouveau_gpuobj *obj; 108 struct nouveau_object *object;
100 int ret; 109 int ret, i;
101 110
102 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 111 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
103 if (!fctx) 112 if (!fctx)
104 return -ENOMEM; 113 return -ENOMEM;
105 114
106 nouveau_fence_context_new(&fctx->base); 115 nouveau_fence_context_new(&fctx->base);
107 116
108 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, 117 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
109 priv->mem->vinst, priv->mem->size, 118 NvSema, 0x0002,
110 NV_MEM_ACCESS_RW, 119 &(struct nv_dma_class) {
111 NV_MEM_TARGET_VRAM, &obj); 120 .flags = NV_DMA_TARGET_VRAM |
112 if (ret == 0) { 121 NV_DMA_ACCESS_RDWR,
113 ret = nouveau_ramht_insert(chan, NvSema, obj); 122 .start = priv->mem->addr,
114 nouveau_gpuobj_ref(NULL, &obj); 123 .limit = priv->mem->addr +
115 nv_wo32(priv->mem, chan->id * 16, 0x00000000); 124 priv->mem->size - 1,
125 }, sizeof(struct nv_dma_class),
126 &object);
127
128 /* dma objects for display sync channel semaphore blocks */
129 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
130 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
131
132 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
133 NvEvoSema0 + i, 0x003d,
134 &(struct nv_dma_class) {
135 .flags = NV_DMA_TARGET_VRAM |
136 NV_DMA_ACCESS_RDWR,
137 .start = bo->bo.offset,
138 .limit = bo->bo.offset + 0xfff,
139 }, sizeof(struct nv_dma_class),
140 &object);
116 } 141 }
117 142
118 if (ret) 143 if (ret)
119 nv84_fence_context_del(chan, engine); 144 nv84_fence_context_del(chan);
145 nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
120 return ret; 146 return ret;
121} 147}
122 148
123static int
124nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
125{
126 return 0;
127}
128
129static int
130nv84_fence_init(struct drm_device *dev, int engine)
131{
132 return 0;
133}
134
135static void 149static void
136nv84_fence_destroy(struct drm_device *dev, int engine) 150nv84_fence_destroy(struct nouveau_drm *drm)
137{ 151{
138 struct drm_nouveau_private *dev_priv = dev->dev_private; 152 struct nv84_fence_priv *priv = drm->fence;
139 struct nv84_fence_priv *priv = nv_engine(dev, engine);
140
141 nouveau_gpuobj_ref(NULL, &priv->mem); 153 nouveau_gpuobj_ref(NULL, &priv->mem);
142 dev_priv->eng[engine] = NULL; 154 drm->fence = NULL;
143 kfree(priv); 155 kfree(priv);
144} 156}
145 157
146int 158int
147nv84_fence_create(struct drm_device *dev) 159nv84_fence_create(struct nouveau_drm *drm)
148{ 160{
149 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 161 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nv84_fence_priv *priv; 162 struct nv84_fence_priv *priv;
163 u32 chan = pfifo->max + 1;
152 int ret; 164 int ret;
153 165
154 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 166 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
155 if (!priv) 167 if (!priv)
156 return -ENOMEM; 168 return -ENOMEM;
157 169
158 priv->base.engine.destroy = nv84_fence_destroy; 170 priv->base.dtor = nv84_fence_destroy;
159 priv->base.engine.init = nv84_fence_init; 171 priv->base.context_new = nv84_fence_context_new;
160 priv->base.engine.fini = nv84_fence_fini; 172 priv->base.context_del = nv84_fence_context_del;
161 priv->base.engine.context_new = nv84_fence_context_new;
162 priv->base.engine.context_del = nv84_fence_context_del;
163 priv->base.emit = nv84_fence_emit; 173 priv->base.emit = nv84_fence_emit;
164 priv->base.sync = nv84_fence_sync; 174 priv->base.sync = nv84_fence_sync;
165 priv->base.read = nv84_fence_read; 175 priv->base.read = nv84_fence_read;
166 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
167
168 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
169 0x1000, 0, &priv->mem);
170 if (ret)
171 goto out;
172 176
173out: 177 ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
178 &priv->mem);
174 if (ret) 179 if (ret)
175 nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 180 nv84_fence_destroy(drm);
176 return ret; 181 return ret;
177} 182}
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
deleted file mode 100644
index 9844a65491c3..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * Copyright (C) 2012 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include "nouveau_drv.h"
29#include "nouveau_fifo.h"
30#include "nouveau_ramht.h"
31#include "nouveau_vm.h"
32
33struct nv84_fifo_priv {
34 struct nouveau_fifo_priv base;
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37};
38
39struct nv84_fifo_chan {
40 struct nouveau_fifo_chan base;
41 struct nouveau_gpuobj *ramfc;
42 struct nouveau_gpuobj *cache;
43};
44
45static int
46nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
47{
48 struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
49 struct nv84_fifo_chan *fctx;
50 struct drm_device *dev = chan->dev;
51 struct drm_nouveau_private *dev_priv = dev->dev_private;
52 u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
53 u64 instance;
54 unsigned long flags;
55 int ret;
56
57 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
58 if (!fctx)
59 return -ENOMEM;
60 atomic_inc(&chan->vm->engref[engine]);
61
62 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
63 NV50_USER(chan->id), PAGE_SIZE);
64 if (!chan->user) {
65 ret = -ENOMEM;
66 goto error;
67 }
68
69 ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
70 NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
71 if (ret)
72 goto error;
73
74 instance = fctx->ramfc->vinst >> 8;
75
76 ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
77 if (ret)
78 goto error;
79
80 nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
81 nv_wo32(fctx->ramfc, 0x40, 0x00000000);
82 nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
83 nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
84 nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
85 nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
86 drm_order(chan->dma.ib_max + 1) << 16);
87 nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
88 nv_wo32(fctx->ramfc, 0x78, 0x00000000);
89 nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
90 nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
91 (4 << 24) /* SEARCH_FULL */ |
92 (chan->ramht->gpuobj->cinst >> 4));
93 nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
94 nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
95
96 nv_wo32(chan->ramin, 0x00, chan->id);
97 nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
98
99 dev_priv->engine.instmem.flush(dev);
100
101 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
102 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
103 nv50_fifo_playlist_update(dev);
104 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
105
106error:
107 if (ret)
108 priv->base.base.context_del(chan, engine);
109 return ret;
110}
111
112static void
113nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
114{
115 struct nv84_fifo_chan *fctx = chan->engctx[engine];
116 struct drm_device *dev = chan->dev;
117 struct drm_nouveau_private *dev_priv = dev->dev_private;
118 unsigned long flags;
119 u32 save;
120
121 /* remove channel from playlist, will context switch if active */
122 spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
123 nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
124 nv50_fifo_playlist_update(dev);
125
126 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
127
128 /* tell any engines on this channel to unload their contexts */
129 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
130 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
131 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
132
133 nv_wr32(dev, 0x002520, save);
134
135 nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
136 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
137
138 /* clean up */
139 if (chan->user) {
140 iounmap(chan->user);
141 chan->user = NULL;
142 }
143
144 nouveau_gpuobj_ref(NULL, &fctx->ramfc);
145 nouveau_gpuobj_ref(NULL, &fctx->cache);
146
147 atomic_dec(&chan->vm->engref[engine]);
148 chan->engctx[engine] = NULL;
149 kfree(fctx);
150}
151
152static int
153nv84_fifo_init(struct drm_device *dev, int engine)
154{
155 struct drm_nouveau_private *dev_priv = dev->dev_private;
156 struct nv84_fifo_chan *fctx;
157 u32 instance;
158 int i;
159
160 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
161 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
162 nv_wr32(dev, 0x00250c, 0x6f3cfc34);
163 nv_wr32(dev, 0x002044, 0x01003fff);
164
165 nv_wr32(dev, 0x002100, 0xffffffff);
166 nv_wr32(dev, 0x002140, 0xffffffff);
167
168 for (i = 0; i < 128; i++) {
169 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
170 if (chan && (fctx = chan->engctx[engine]))
171 instance = 0x80000000 | fctx->ramfc->vinst >> 8;
172 else
173 instance = 0x00000000;
174 nv_wr32(dev, 0x002600 + (i * 4), instance);
175 }
176
177 nv50_fifo_playlist_update(dev);
178
179 nv_wr32(dev, 0x003200, 1);
180 nv_wr32(dev, 0x003250, 1);
181 nv_wr32(dev, 0x002500, 1);
182 return 0;
183}
184
185static int
186nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
187{
188 struct drm_nouveau_private *dev_priv = dev->dev_private;
189 struct nv84_fifo_priv *priv = nv_engine(dev, engine);
190 int i;
191 u32 save;
192
193 /* set playlist length to zero, fifo will unload context */
194 nv_wr32(dev, 0x0032ec, 0);
195
196 save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
197
198 /* tell all connected engines to unload their contexts */
199 for (i = 0; i < priv->base.channels; i++) {
200 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
201 if (chan)
202 nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
203 if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
204 NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
205 return -EBUSY;
206 }
207 }
208
209 nv_wr32(dev, 0x002520, save);
210 nv_wr32(dev, 0x002140, 0);
211 return 0;
212}
213
214int
215nv84_fifo_create(struct drm_device *dev)
216{
217 struct drm_nouveau_private *dev_priv = dev->dev_private;
218 struct nv84_fifo_priv *priv;
219 int ret;
220
221 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
222 if (!priv)
223 return -ENOMEM;
224
225 priv->base.base.destroy = nv50_fifo_destroy;
226 priv->base.base.init = nv84_fifo_init;
227 priv->base.base.fini = nv84_fifo_fini;
228 priv->base.base.context_new = nv84_fifo_context_new;
229 priv->base.base.context_del = nv84_fifo_context_del;
230 priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
231 priv->base.channels = 127;
232 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
233
234 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
235 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
236 if (ret)
237 goto error;
238
239 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
240 NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
241 if (ret)
242 goto error;
243
244 nouveau_irq_register(dev, 8, nv04_fifo_isr);
245error:
246 if (ret)
247 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
248 return ret;
249}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
deleted file mode 100644
index 0dec4958eb5f..000000000000
--- a/drivers/gpu/drm/nouveau/nv84_vp.c
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_util.h"
28#include "nouveau_vm.h"
29#include "nouveau_ramht.h"
30
31/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
32 * more than just an enable/disable stub this needs to be split out to
33 * nv98_vp.c...
34 */
35
36struct nv84_vp_engine {
37 struct nouveau_exec_engine base;
38};
39
40static int
41nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
42{
43 if (!(nv_rd32(dev, 0x000200) & 0x00020000))
44 return 0;
45
46 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
47 return 0;
48}
49
50static int
51nv84_vp_init(struct drm_device *dev, int engine)
52{
53 nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
54 nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
55 return 0;
56}
57
58static void
59nv84_vp_destroy(struct drm_device *dev, int engine)
60{
61 struct nv84_vp_engine *pvp = nv_engine(dev, engine);
62
63 NVOBJ_ENGINE_DEL(dev, VP);
64
65 kfree(pvp);
66}
67
68int
69nv84_vp_create(struct drm_device *dev)
70{
71 struct nv84_vp_engine *pvp;
72
73 pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
74 if (!pvp)
75 return -ENOMEM;
76
77 pvp->base.destroy = nv84_vp_destroy;
78 pvp->base.init = nv84_vp_init;
79 pvp->base.fini = nv84_vp_fini;
80
81 NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
82 return 0;
83}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
deleted file mode 100644
index 6f4c15345b9b..000000000000
--- a/drivers/gpu/drm/nouveau/nv98_crypt.c
+++ /dev/null
@@ -1,216 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31
32#include "nv98_crypt.fuc.h"
33
34struct nv98_crypt_priv {
35 struct nouveau_exec_engine base;
36};
37
38struct nv98_crypt_chan {
39 struct nouveau_gpuobj *mem;
40};
41
42static int
43nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
44{
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
48 struct nv98_crypt_chan *cctx;
49 int ret;
50
51 cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
52 if (!cctx)
53 return -ENOMEM;
54
55 atomic_inc(&chan->vm->engref[engine]);
56
57 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
58 NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
59 if (ret)
60 goto error;
61
62 nv_wo32(chan->ramin, 0xa0, 0x00190000);
63 nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
64 nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
65 nv_wo32(chan->ramin, 0xac, 0x00000000);
66 nv_wo32(chan->ramin, 0xb0, 0x00000000);
67 nv_wo32(chan->ramin, 0xb4, 0x00000000);
68 dev_priv->engine.instmem.flush(dev);
69
70error:
71 if (ret)
72 priv->base.context_del(chan, engine);
73 return ret;
74}
75
76static void
77nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
78{
79 struct nv98_crypt_chan *cctx = chan->engctx[engine];
80 int i;
81
82 for (i = 0xa0; i < 0xb4; i += 4)
83 nv_wo32(chan->ramin, i, 0x00000000);
84
85 nouveau_gpuobj_ref(NULL, &cctx->mem);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 chan->engctx[engine] = NULL;
89 kfree(cctx);
90}
91
92static int
93nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
94 u32 handle, u16 class)
95{
96 struct nv98_crypt_chan *cctx = chan->engctx[engine];
97
98 /* fuc engine doesn't need an object, our ramht code does.. */
99 cctx->mem->engine = 5;
100 cctx->mem->class = class;
101 return nouveau_ramht_insert(chan, handle, cctx->mem);
102}
103
104static void
105nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
106{
107 nv50_vm_flush_engine(dev, 0x0a);
108}
109
110static int
111nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
112{
113 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
114 return 0;
115}
116
117static int
118nv98_crypt_init(struct drm_device *dev, int engine)
119{
120 int i;
121
122 /* reset! */
123 nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
124 nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
125
126 /* wait for exit interrupt to signal */
127 nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
128 nv_wr32(dev, 0x087004, 0x00000010);
129
130 /* upload microcode code and data segments */
131 nv_wr32(dev, 0x087ff8, 0x00100000);
132 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
133 nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
134
135 nv_wr32(dev, 0x087ff8, 0x00000000);
136 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
137 nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
138
139 /* start it running */
140 nv_wr32(dev, 0x08710c, 0x00000000);
141 nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
142 nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
143 return 0;
144}
145
146static struct nouveau_enum nv98_crypt_isr_error_name[] = {
147 { 0x0000, "ILLEGAL_MTHD" },
148 { 0x0001, "INVALID_BITFIELD" },
149 { 0x0002, "INVALID_ENUM" },
150 { 0x0003, "QUERY" },
151 {}
152};
153
154static void
155nv98_crypt_isr(struct drm_device *dev)
156{
157 u32 disp = nv_rd32(dev, 0x08701c);
158 u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
159 u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
160 u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
161 u32 addr = nv_rd32(dev, 0x087040) >> 16;
162 u32 mthd = (addr & 0x07ff) << 2;
163 u32 subc = (addr & 0x3800) >> 11;
164 u32 data = nv_rd32(dev, 0x087044);
165 int chid = nv50_graph_isr_chid(dev, inst);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
169 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
170 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, 0x087004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, 0x087004, stat);
179 }
180
181 nv50_fb_vm_trap(dev, 1);
182}
183
184static void
185nv98_crypt_destroy(struct drm_device *dev, int engine)
186{
187 struct nv98_crypt_priv *priv = nv_engine(dev, engine);
188
189 nouveau_irq_unregister(dev, 14);
190 NVOBJ_ENGINE_DEL(dev, CRYPT);
191 kfree(priv);
192}
193
194int
195nv98_crypt_create(struct drm_device *dev)
196{
197 struct nv98_crypt_priv *priv;
198
199 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
200 if (!priv)
201 return -ENOMEM;
202
203 priv->base.destroy = nv98_crypt_destroy;
204 priv->base.init = nv98_crypt_init;
205 priv->base.fini = nv98_crypt_fini;
206 priv->base.context_new = nv98_crypt_context_new;
207 priv->base.context_del = nv98_crypt_context_del;
208 priv->base.object_new = nv98_crypt_object_new;
209 priv->base.tlb_flush = nv98_crypt_tlb_flush;
210
211 nouveau_irq_register(dev, 14, nv98_crypt_isr);
212
213 NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
214 NVOBJ_CLASS(dev, 0x88b4, CRYPT);
215 return 0;
216}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.c b/drivers/gpu/drm/nouveau/nva3_copy.c
deleted file mode 100644
index 7801cbd057fa..000000000000
--- a/drivers/gpu/drm/nouveau/nva3_copy.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nva3_copy.fuc.h"
32
33struct nva3_copy_engine {
34 struct nouveau_exec_engine base;
35};
36
37static int
38nva3_copy_context_new(struct nouveau_channel *chan, int engine)
39{
40 struct drm_device *dev = chan->dev;
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nouveau_gpuobj *ramin = chan->ramin;
43 struct nouveau_gpuobj *ctx = NULL;
44 int ret;
45
46 NV_DEBUG(dev, "ch%d\n", chan->id);
47
48 ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
49 NVOBJ_FLAG_ZERO_FREE, &ctx);
50 if (ret)
51 return ret;
52
53 nv_wo32(ramin, 0xc0, 0x00190000);
54 nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
55 nv_wo32(ramin, 0xc8, ctx->vinst);
56 nv_wo32(ramin, 0xcc, 0x00000000);
57 nv_wo32(ramin, 0xd0, 0x00000000);
58 nv_wo32(ramin, 0xd4, 0x00000000);
59 dev_priv->engine.instmem.flush(dev);
60
61 atomic_inc(&chan->vm->engref[engine]);
62 chan->engctx[engine] = ctx;
63 return 0;
64}
65
66static int
67nva3_copy_object_new(struct nouveau_channel *chan, int engine,
68 u32 handle, u16 class)
69{
70 struct nouveau_gpuobj *ctx = chan->engctx[engine];
71
72 /* fuc engine doesn't need an object, our ramht code does.. */
73 ctx->engine = 3;
74 ctx->class = class;
75 return nouveau_ramht_insert(chan, handle, ctx);
76}
77
78static void
79nva3_copy_context_del(struct nouveau_channel *chan, int engine)
80{
81 struct nouveau_gpuobj *ctx = chan->engctx[engine];
82 int i;
83
84 for (i = 0xc0; i <= 0xd4; i += 4)
85 nv_wo32(chan->ramin, i, 0x00000000);
86
87 atomic_dec(&chan->vm->engref[engine]);
88 nouveau_gpuobj_ref(NULL, &ctx);
89 chan->engctx[engine] = ctx;
90}
91
92static void
93nva3_copy_tlb_flush(struct drm_device *dev, int engine)
94{
95 nv50_vm_flush_engine(dev, 0x0d);
96}
97
98static int
99nva3_copy_init(struct drm_device *dev, int engine)
100{
101 int i;
102
103 nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
104 nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
105 nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
106
107 /* upload ucode */
108 nv_wr32(dev, 0x1041c0, 0x01000000);
109 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
110 nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
111
112 nv_wr32(dev, 0x104180, 0x01000000);
113 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
114 if ((i & 0x3f) == 0)
115 nv_wr32(dev, 0x104188, i >> 6);
116 nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
117 }
118
119 /* start it running */
120 nv_wr32(dev, 0x10410c, 0x00000000);
121 nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
122 nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
123 return 0;
124}
125
126static int
127nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
128{
129 nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
130 nv_wr32(dev, 0x104014, 0xffffffff);
131 return 0;
132}
133
134static struct nouveau_enum nva3_copy_isr_error_name[] = {
135 { 0x0001, "ILLEGAL_MTHD" },
136 { 0x0002, "INVALID_ENUM" },
137 { 0x0003, "INVALID_BITFIELD" },
138 {}
139};
140
141static void
142nva3_copy_isr(struct drm_device *dev)
143{
144 u32 dispatch = nv_rd32(dev, 0x10401c);
145 u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
146 u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
147 u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
148 u32 addr = nv_rd32(dev, 0x104040) >> 16;
149 u32 mthd = (addr & 0x07ff) << 2;
150 u32 subc = (addr & 0x3800) >> 11;
151 u32 data = nv_rd32(dev, 0x104044);
152 int chid = nv50_graph_isr_chid(dev, inst);
153
154 if (stat & 0x00000040) {
155 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
156 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
157 printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
158 chid, inst, subc, mthd, data);
159 nv_wr32(dev, 0x104004, 0x00000040);
160 stat &= ~0x00000040;
161 }
162
163 if (stat) {
164 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
165 nv_wr32(dev, 0x104004, stat);
166 }
167 nv50_fb_vm_trap(dev, 1);
168}
169
170static void
171nva3_copy_destroy(struct drm_device *dev, int engine)
172{
173 struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
174
175 nouveau_irq_unregister(dev, 22);
176
177 NVOBJ_ENGINE_DEL(dev, COPY0);
178 kfree(pcopy);
179}
180
181int
182nva3_copy_create(struct drm_device *dev)
183{
184 struct nva3_copy_engine *pcopy;
185
186 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
187 if (!pcopy)
188 return -ENOMEM;
189
190 pcopy->base.destroy = nva3_copy_destroy;
191 pcopy->base.init = nva3_copy_init;
192 pcopy->base.fini = nva3_copy_fini;
193 pcopy->base.context_new = nva3_copy_context_new;
194 pcopy->base.context_del = nva3_copy_context_del;
195 pcopy->base.object_new = nva3_copy_object_new;
196 pcopy->base.tlb_flush = nva3_copy_tlb_flush;
197
198 nouveau_irq_register(dev, 22, nva3_copy_isr);
199
200 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
201 NVOBJ_CLASS(dev, 0x85b5, COPY0);
202 return 0;
203}
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 9258524e4c80..863f010fafeb 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -23,17 +23,24 @@
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "nouveau_drv.h" 26#include "nouveau_drm.h"
27#include "nouveau_bios.h" 27#include "nouveau_bios.h"
28#include "nouveau_pm.h" 28#include "nouveau_pm.h"
29 29
30#include <subdev/bios/pll.h>
31#include <subdev/bios.h>
32#include <subdev/clock.h>
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
30static u32 read_clk(struct drm_device *, int, bool); 36static u32 read_clk(struct drm_device *, int, bool);
31static u32 read_pll(struct drm_device *, int, u32); 37static u32 read_pll(struct drm_device *, int, u32);
32 38
33static u32 39static u32
34read_vco(struct drm_device *dev, int clk) 40read_vco(struct drm_device *dev, int clk)
35{ 41{
36 u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); 42 struct nouveau_device *device = nouveau_dev(dev);
43 u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
37 if ((sctl & 0x00000030) != 0x00000030) 44 if ((sctl & 0x00000030) != 0x00000030)
38 return read_pll(dev, 0x41, 0x00e820); 45 return read_pll(dev, 0x41, 0x00e820);
39 return read_pll(dev, 0x42, 0x00e8a0); 46 return read_pll(dev, 0x42, 0x00e8a0);
@@ -42,26 +49,27 @@ read_vco(struct drm_device *dev, int clk)
42static u32 49static u32
43read_clk(struct drm_device *dev, int clk, bool ignore_en) 50read_clk(struct drm_device *dev, int clk, bool ignore_en)
44{ 51{
45 struct drm_nouveau_private *dev_priv = dev->dev_private; 52 struct nouveau_device *device = nouveau_dev(dev);
53 struct nouveau_drm *drm = nouveau_drm(dev);
46 u32 sctl, sdiv, sclk; 54 u32 sctl, sdiv, sclk;
47 55
48 /* refclk for the 0xe8xx plls is a fixed frequency */ 56 /* refclk for the 0xe8xx plls is a fixed frequency */
49 if (clk >= 0x40) { 57 if (clk >= 0x40) {
50 if (dev_priv->chipset == 0xaf) { 58 if (nv_device(drm->device)->chipset == 0xaf) {
51 /* no joke.. seriously.. sigh.. */ 59 /* no joke.. seriously.. sigh.. */
52 return nv_rd32(dev, 0x00471c) * 1000; 60 return nv_rd32(device, 0x00471c) * 1000;
53 } 61 }
54 62
55 return dev_priv->crystal; 63 return device->crystal;
56 } 64 }
57 65
58 sctl = nv_rd32(dev, 0x4120 + (clk * 4)); 66 sctl = nv_rd32(device, 0x4120 + (clk * 4));
59 if (!ignore_en && !(sctl & 0x00000100)) 67 if (!ignore_en && !(sctl & 0x00000100))
60 return 0; 68 return 0;
61 69
62 switch (sctl & 0x00003000) { 70 switch (sctl & 0x00003000) {
63 case 0x00000000: 71 case 0x00000000:
64 return dev_priv->crystal; 72 return device->crystal;
65 case 0x00002000: 73 case 0x00002000:
66 if (sctl & 0x00000040) 74 if (sctl & 0x00000040)
67 return 108000; 75 return 108000;
@@ -78,12 +86,13 @@ read_clk(struct drm_device *dev, int clk, bool ignore_en)
78static u32 86static u32
79read_pll(struct drm_device *dev, int clk, u32 pll) 87read_pll(struct drm_device *dev, int clk, u32 pll)
80{ 88{
81 u32 ctrl = nv_rd32(dev, pll + 0); 89 struct nouveau_device *device = nouveau_dev(dev);
90 u32 ctrl = nv_rd32(device, pll + 0);
82 u32 sclk = 0, P = 1, N = 1, M = 1; 91 u32 sclk = 0, P = 1, N = 1, M = 1;
83 92
84 if (!(ctrl & 0x00000008)) { 93 if (!(ctrl & 0x00000008)) {
85 if (ctrl & 0x00000001) { 94 if (ctrl & 0x00000001) {
86 u32 coef = nv_rd32(dev, pll + 4); 95 u32 coef = nv_rd32(device, pll + 4);
87 M = (coef & 0x000000ff) >> 0; 96 M = (coef & 0x000000ff) >> 0;
88 N = (coef & 0x0000ff00) >> 8; 97 N = (coef & 0x0000ff00) >> 8;
89 P = (coef & 0x003f0000) >> 16; 98 P = (coef & 0x003f0000) >> 16;
@@ -111,7 +120,10 @@ struct creg {
111static int 120static int
112calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg) 121calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
113{ 122{
114 struct pll_lims limits; 123 struct nouveau_drm *drm = nouveau_drm(dev);
124 struct nouveau_device *device = nouveau_dev(dev);
125 struct nouveau_bios *bios = nouveau_bios(device);
126 struct nvbios_pll limits;
115 u32 oclk, sclk, sdiv; 127 u32 oclk, sclk, sdiv;
116 int P, N, M, diff; 128 int P, N, M, diff;
117 int ret; 129 int ret;
@@ -119,7 +131,7 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
119 reg->pll = 0; 131 reg->pll = 0;
120 reg->clk = 0; 132 reg->clk = 0;
121 if (!khz) { 133 if (!khz) {
122 NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk); 134 NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
123 return 0; 135 return 0;
124 } 136 }
125 137
@@ -154,14 +166,14 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
154 } 166 }
155 167
156 if (!pll) { 168 if (!pll) {
157 NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk); 169 NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
158 return -ERANGE; 170 return -ERANGE;
159 } 171 }
160 172
161 break; 173 break;
162 } 174 }
163 175
164 ret = get_pll_limits(dev, pll, &limits); 176 ret = nvbios_pll_parse(bios, pll, &limits);
165 if (ret) 177 if (ret)
166 return ret; 178 return ret;
167 179
@@ -171,54 +183,60 @@ calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
171 183
172 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P); 184 ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
173 if (ret >= 0) { 185 if (ret >= 0) {
174 reg->clk = nv_rd32(dev, 0x4120 + (clk * 4)); 186 reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
175 reg->pll = (P << 16) | (N << 8) | M; 187 reg->pll = (P << 16) | (N << 8) | M;
176 } 188 }
189
177 return ret; 190 return ret;
178} 191}
179 192
180static void 193static void
181prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg) 194prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
182{ 195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
183 const u32 src0 = 0x004120 + (clk * 4); 198 const u32 src0 = 0x004120 + (clk * 4);
184 const u32 src1 = 0x004160 + (clk * 4); 199 const u32 src1 = 0x004160 + (clk * 4);
185 const u32 ctrl = pll + 0; 200 const u32 ctrl = pll + 0;
186 const u32 coef = pll + 4; 201 const u32 coef = pll + 4;
187 202
188 if (!reg->clk && !reg->pll) { 203 if (!reg->clk && !reg->pll) {
189 NV_DEBUG(dev, "no clock for %02x\n", clk); 204 NV_DEBUG(drm, "no clock for %02x\n", clk);
190 return; 205 return;
191 } 206 }
192 207
193 if (reg->pll) { 208 if (reg->pll) {
194 nv_mask(dev, src0, 0x00000101, 0x00000101); 209 nv_mask(device, src0, 0x00000101, 0x00000101);
195 nv_wr32(dev, coef, reg->pll); 210 nv_wr32(device, coef, reg->pll);
196 nv_mask(dev, ctrl, 0x00000015, 0x00000015); 211 nv_mask(device, ctrl, 0x00000015, 0x00000015);
197 nv_mask(dev, ctrl, 0x00000010, 0x00000000); 212 nv_mask(device, ctrl, 0x00000010, 0x00000000);
198 nv_wait(dev, ctrl, 0x00020000, 0x00020000); 213 nv_wait(device, ctrl, 0x00020000, 0x00020000);
199 nv_mask(dev, ctrl, 0x00000010, 0x00000010); 214 nv_mask(device, ctrl, 0x00000010, 0x00000010);
200 nv_mask(dev, ctrl, 0x00000008, 0x00000000); 215 nv_mask(device, ctrl, 0x00000008, 0x00000000);
201 nv_mask(dev, src1, 0x00000100, 0x00000000); 216 nv_mask(device, src1, 0x00000100, 0x00000000);
202 nv_mask(dev, src1, 0x00000001, 0x00000000); 217 nv_mask(device, src1, 0x00000001, 0x00000000);
203 } else { 218 } else {
204 nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk); 219 nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
205 nv_mask(dev, ctrl, 0x00000018, 0x00000018); 220 nv_mask(device, ctrl, 0x00000018, 0x00000018);
206 udelay(20); 221 udelay(20);
207 nv_mask(dev, ctrl, 0x00000001, 0x00000000); 222 nv_mask(device, ctrl, 0x00000001, 0x00000000);
208 nv_mask(dev, src0, 0x00000100, 0x00000000); 223 nv_mask(device, src0, 0x00000100, 0x00000000);
209 nv_mask(dev, src0, 0x00000001, 0x00000000); 224 nv_mask(device, src0, 0x00000001, 0x00000000);
210 } 225 }
211} 226}
212 227
213static void 228static void
214prog_clk(struct drm_device *dev, int clk, struct creg *reg) 229prog_clk(struct drm_device *dev, int clk, struct creg *reg)
215{ 230{
231 struct nouveau_device *device = nouveau_dev(dev);
232 struct nouveau_drm *drm = nouveau_drm(dev);
233
216 if (!reg->clk) { 234 if (!reg->clk) {
217 NV_DEBUG(dev, "no clock for %02x\n", clk); 235 NV_DEBUG(drm, "no clock for %02x\n", clk);
218 return; 236 return;
219 } 237 }
220 238
221 nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk); 239 nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
222} 240}
223 241
224int 242int
@@ -309,10 +327,11 @@ static bool
309nva3_pm_grcp_idle(void *data) 327nva3_pm_grcp_idle(void *data)
310{ 328{
311 struct drm_device *dev = data; 329 struct drm_device *dev = data;
330 struct nouveau_device *device = nouveau_dev(dev);
312 331
313 if (!(nv_rd32(dev, 0x400304) & 0x00000001)) 332 if (!(nv_rd32(device, 0x400304) & 0x00000001))
314 return true; 333 return true;
315 if (nv_rd32(dev, 0x400308) == 0x0050001c) 334 if (nv_rd32(device, 0x400308) == 0x0050001c)
316 return true; 335 return true;
317 return false; 336 return false;
318} 337}
@@ -320,85 +339,91 @@ nva3_pm_grcp_idle(void *data)
320static void 339static void
321mclk_precharge(struct nouveau_mem_exec_func *exec) 340mclk_precharge(struct nouveau_mem_exec_func *exec)
322{ 341{
323 nv_wr32(exec->dev, 0x1002d4, 0x00000001); 342 struct nouveau_device *device = nouveau_dev(exec->dev);
343 nv_wr32(device, 0x1002d4, 0x00000001);
324} 344}
325 345
326static void 346static void
327mclk_refresh(struct nouveau_mem_exec_func *exec) 347mclk_refresh(struct nouveau_mem_exec_func *exec)
328{ 348{
329 nv_wr32(exec->dev, 0x1002d0, 0x00000001); 349 struct nouveau_device *device = nouveau_dev(exec->dev);
350 nv_wr32(device, 0x1002d0, 0x00000001);
330} 351}
331 352
332static void 353static void
333mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) 354mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
334{ 355{
335 nv_wr32(exec->dev, 0x100210, enable ? 0x80000000 : 0x00000000); 356 struct nouveau_device *device = nouveau_dev(exec->dev);
357 nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
336} 358}
337 359
338static void 360static void
339mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable) 361mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
340{ 362{
341 nv_wr32(exec->dev, 0x1002dc, enable ? 0x00000001 : 0x00000000); 363 struct nouveau_device *device = nouveau_dev(exec->dev);
364 nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
342} 365}
343 366
344static void 367static void
345mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec) 368mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
346{ 369{
347 volatile u32 post = nv_rd32(exec->dev, 0); (void)post; 370 struct nouveau_device *device = nouveau_dev(exec->dev);
371 volatile u32 post = nv_rd32(device, 0); (void)post;
348 udelay((nsec + 500) / 1000); 372 udelay((nsec + 500) / 1000);
349} 373}
350 374
351static u32 375static u32
352mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 376mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
353{ 377{
378 struct nouveau_device *device = nouveau_dev(exec->dev);
354 if (mr <= 1) 379 if (mr <= 1)
355 return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4)); 380 return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
356 if (mr <= 3) 381 if (mr <= 3)
357 return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4)); 382 return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
358 return 0; 383 return 0;
359} 384}
360 385
361static void 386static void
362mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 387mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
363{ 388{
364 struct drm_nouveau_private *dev_priv = exec->dev->dev_private; 389 struct nouveau_device *device = nouveau_dev(exec->dev);
365 390 struct nouveau_fb *pfb = nouveau_fb(device);
366 if (mr <= 1) { 391 if (mr <= 1) {
367 if (dev_priv->vram_rank_B) 392 if (pfb->ram.ranks > 1)
368 nv_wr32(exec->dev, 0x1002c8 + ((mr - 0) * 4), data); 393 nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
369 nv_wr32(exec->dev, 0x1002c0 + ((mr - 0) * 4), data); 394 nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
370 } else 395 } else
371 if (mr <= 3) { 396 if (mr <= 3) {
372 if (dev_priv->vram_rank_B) 397 if (pfb->ram.ranks > 1)
373 nv_wr32(exec->dev, 0x1002e8 + ((mr - 2) * 4), data); 398 nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
374 nv_wr32(exec->dev, 0x1002e0 + ((mr - 2) * 4), data); 399 nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
375 } 400 }
376} 401}
377 402
378static void 403static void
379mclk_clock_set(struct nouveau_mem_exec_func *exec) 404mclk_clock_set(struct nouveau_mem_exec_func *exec)
380{ 405{
381 struct drm_device *dev = exec->dev; 406 struct nouveau_device *device = nouveau_dev(exec->dev);
382 struct nva3_pm_state *info = exec->priv; 407 struct nva3_pm_state *info = exec->priv;
383 u32 ctrl; 408 u32 ctrl;
384 409
385 ctrl = nv_rd32(dev, 0x004000); 410 ctrl = nv_rd32(device, 0x004000);
386 if (!(ctrl & 0x00000008) && info->mclk.pll) { 411 if (!(ctrl & 0x00000008) && info->mclk.pll) {
387 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); 412 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
388 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); 413 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
389 nv_wr32(dev, 0x004018, 0x00001000); 414 nv_wr32(device, 0x004018, 0x00001000);
390 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000001)); 415 nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
391 nv_wr32(dev, 0x004004, info->mclk.pll); 416 nv_wr32(device, 0x004004, info->mclk.pll);
392 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); 417 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
393 udelay(64); 418 udelay(64);
394 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); 419 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
395 udelay(20); 420 udelay(20);
396 } else 421 } else
397 if (!info->mclk.pll) { 422 if (!info->mclk.pll) {
398 nv_mask(dev, 0x004168, 0x003f3040, info->mclk.clk); 423 nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
399 nv_wr32(dev, 0x004000, (ctrl |= 0x00000008)); 424 nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
400 nv_mask(dev, 0x1110e0, 0x00088000, 0x00088000); 425 nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
401 nv_wr32(dev, 0x004018, 0x0000d000 | info->r004018); 426 nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
402 } 427 }
403 428
404 if (info->rammap) { 429 if (info->rammap) {
@@ -410,67 +435,68 @@ mclk_clock_set(struct nouveau_mem_exec_func *exec)
410 (info->ramcfg[3] & 0x0f) << 16 | 435 (info->ramcfg[3] & 0x0f) << 16 |
411 (info->ramcfg[9] & 0x0f) | 436 (info->ramcfg[9] & 0x0f) |
412 0x80000000; 437 0x80000000;
413 nv_wr32(dev, 0x1005a0, unk5a0); 438 nv_wr32(device, 0x1005a0, unk5a0);
414 nv_wr32(dev, 0x1005a4, unk5a4); 439 nv_wr32(device, 0x1005a4, unk5a4);
415 nv_wr32(dev, 0x10f804, unk804); 440 nv_wr32(device, 0x10f804, unk804);
416 nv_mask(dev, 0x10053c, 0x00001000, 0x00000000); 441 nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
417 } else { 442 } else {
418 nv_mask(dev, 0x10053c, 0x00001000, 0x00001000); 443 nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
419 nv_mask(dev, 0x10f804, 0x80000000, 0x00000000); 444 nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
420 nv_mask(dev, 0x100760, 0x22222222, info->r100760); 445 nv_mask(device, 0x100760, 0x22222222, info->r100760);
421 nv_mask(dev, 0x1007a0, 0x22222222, info->r100760); 446 nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
422 nv_mask(dev, 0x1007e0, 0x22222222, info->r100760); 447 nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
423 } 448 }
424 } 449 }
425 450
426 if (info->mclk.pll) { 451 if (info->mclk.pll) {
427 nv_mask(dev, 0x1110e0, 0x00088000, 0x00011000); 452 nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
428 nv_wr32(dev, 0x004000, (ctrl &= ~0x00000008)); 453 nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
429 } 454 }
430} 455}
431 456
432static void 457static void
433mclk_timing_set(struct nouveau_mem_exec_func *exec) 458mclk_timing_set(struct nouveau_mem_exec_func *exec)
434{ 459{
435 struct drm_device *dev = exec->dev; 460 struct nouveau_device *device = nouveau_dev(exec->dev);
436 struct nva3_pm_state *info = exec->priv; 461 struct nva3_pm_state *info = exec->priv;
437 struct nouveau_pm_level *perflvl = info->perflvl; 462 struct nouveau_pm_level *perflvl = info->perflvl;
438 int i; 463 int i;
439 464
440 for (i = 0; i < 9; i++) 465 for (i = 0; i < 9; i++)
441 nv_wr32(dev, 0x100220 + (i * 4), perflvl->timing.reg[i]); 466 nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
442 467
443 if (info->ramcfg) { 468 if (info->ramcfg) {
444 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000; 469 u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
445 nv_mask(dev, 0x100200, 0x00001000, data); 470 nv_mask(device, 0x100200, 0x00001000, data);
446 } 471 }
447 472
448 if (info->ramcfg) { 473 if (info->ramcfg) {
449 u32 unk714 = nv_rd32(dev, 0x100714) & ~0xf0000010; 474 u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
450 u32 unk718 = nv_rd32(dev, 0x100718) & ~0x00000100; 475 u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
451 u32 unk71c = nv_rd32(dev, 0x10071c) & ~0x00000100; 476 u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
452 if ( (info->ramcfg[2] & 0x20)) 477 if ( (info->ramcfg[2] & 0x20))
453 unk714 |= 0xf0000000; 478 unk714 |= 0xf0000000;
454 if (!(info->ramcfg[2] & 0x04)) 479 if (!(info->ramcfg[2] & 0x04))
455 unk714 |= 0x00000010; 480 unk714 |= 0x00000010;
456 nv_wr32(dev, 0x100714, unk714); 481 nv_wr32(device, 0x100714, unk714);
457 482
458 if (info->ramcfg[2] & 0x01) 483 if (info->ramcfg[2] & 0x01)
459 unk71c |= 0x00000100; 484 unk71c |= 0x00000100;
460 nv_wr32(dev, 0x10071c, unk71c); 485 nv_wr32(device, 0x10071c, unk71c);
461 486
462 if (info->ramcfg[2] & 0x02) 487 if (info->ramcfg[2] & 0x02)
463 unk718 |= 0x00000100; 488 unk718 |= 0x00000100;
464 nv_wr32(dev, 0x100718, unk718); 489 nv_wr32(device, 0x100718, unk718);
465 490
466 if (info->ramcfg[2] & 0x10) 491 if (info->ramcfg[2] & 0x10)
467 nv_wr32(dev, 0x111100, 0x48000000); /*XXX*/ 492 nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
468 } 493 }
469} 494}
470 495
471static void 496static void
472prog_mem(struct drm_device *dev, struct nva3_pm_state *info) 497prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
473{ 498{
499 struct nouveau_device *device = nouveau_dev(dev);
474 struct nouveau_mem_exec_func exec = { 500 struct nouveau_mem_exec_func exec = {
475 .dev = dev, 501 .dev = dev,
476 .precharge = mclk_precharge, 502 .precharge = mclk_precharge,
@@ -492,17 +518,17 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
492 info->r100760 = 0x22222222; 518 info->r100760 = 0x22222222;
493 } 519 }
494 520
495 ctrl = nv_rd32(dev, 0x004000); 521 ctrl = nv_rd32(device, 0x004000);
496 if (ctrl & 0x00000008) { 522 if (ctrl & 0x00000008) {
497 if (info->mclk.pll) { 523 if (info->mclk.pll) {
498 nv_mask(dev, 0x004128, 0x00000101, 0x00000101); 524 nv_mask(device, 0x004128, 0x00000101, 0x00000101);
499 nv_wr32(dev, 0x004004, info->mclk.pll); 525 nv_wr32(device, 0x004004, info->mclk.pll);
500 nv_wr32(dev, 0x004000, (ctrl |= 0x00000001)); 526 nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
501 nv_wr32(dev, 0x004000, (ctrl &= 0xffffffef)); 527 nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
502 nv_wait(dev, 0x004000, 0x00020000, 0x00020000); 528 nv_wait(device, 0x004000, 0x00020000, 0x00020000);
503 nv_wr32(dev, 0x004000, (ctrl |= 0x00000010)); 529 nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
504 nv_wr32(dev, 0x004018, 0x00005000 | info->r004018); 530 nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
505 nv_wr32(dev, 0x004000, (ctrl |= 0x00000004)); 531 nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
506 } 532 }
507 } else { 533 } else {
508 u32 ssel = 0x00000101; 534 u32 ssel = 0x00000101;
@@ -510,68 +536,67 @@ prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
510 ssel |= info->mclk.clk; 536 ssel |= info->mclk.clk;
511 else 537 else
512 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */ 538 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
513 nv_mask(dev, 0x004168, 0x003f3141, ctrl); 539 nv_mask(device, 0x004168, 0x003f3141, ctrl);
514 } 540 }
515 541
516 if (info->ramcfg) { 542 if (info->ramcfg) {
517 if (info->ramcfg[2] & 0x10) { 543 if (info->ramcfg[2] & 0x10) {
518 nv_mask(dev, 0x111104, 0x00000600, 0x00000000); 544 nv_mask(device, 0x111104, 0x00000600, 0x00000000);
519 } else { 545 } else {
520 nv_mask(dev, 0x111100, 0x40000000, 0x40000000); 546 nv_mask(device, 0x111100, 0x40000000, 0x40000000);
521 nv_mask(dev, 0x111104, 0x00000180, 0x00000000); 547 nv_mask(device, 0x111104, 0x00000180, 0x00000000);
522 } 548 }
523 } 549 }
524 if (info->rammap && !(info->rammap[4] & 0x02)) 550 if (info->rammap && !(info->rammap[4] & 0x02))
525 nv_mask(dev, 0x100200, 0x00000800, 0x00000000); 551 nv_mask(device, 0x100200, 0x00000800, 0x00000000);
526 nv_wr32(dev, 0x611200, 0x00003300); 552 nv_wr32(device, 0x611200, 0x00003300);
527 if (!(info->ramcfg[2] & 0x10)) 553 if (!(info->ramcfg[2] & 0x10))
528 nv_wr32(dev, 0x111100, 0x4c020000); /*XXX*/ 554 nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
529 555
530 nouveau_mem_exec(&exec, info->perflvl); 556 nouveau_mem_exec(&exec, info->perflvl);
531 557
532 nv_wr32(dev, 0x611200, 0x00003330); 558 nv_wr32(device, 0x611200, 0x00003330);
533 if (info->rammap && (info->rammap[4] & 0x02)) 559 if (info->rammap && (info->rammap[4] & 0x02))
534 nv_mask(dev, 0x100200, 0x00000800, 0x00000800); 560 nv_mask(device, 0x100200, 0x00000800, 0x00000800);
535 if (info->ramcfg) { 561 if (info->ramcfg) {
536 if (info->ramcfg[2] & 0x10) { 562 if (info->ramcfg[2] & 0x10) {
537 nv_mask(dev, 0x111104, 0x00000180, 0x00000180); 563 nv_mask(device, 0x111104, 0x00000180, 0x00000180);
538 nv_mask(dev, 0x111100, 0x40000000, 0x00000000); 564 nv_mask(device, 0x111100, 0x40000000, 0x00000000);
539 } else { 565 } else {
540 nv_mask(dev, 0x111104, 0x00000600, 0x00000600); 566 nv_mask(device, 0x111104, 0x00000600, 0x00000600);
541 } 567 }
542 } 568 }
543 569
544 if (info->mclk.pll) { 570 if (info->mclk.pll) {
545 nv_mask(dev, 0x004168, 0x00000001, 0x00000000); 571 nv_mask(device, 0x004168, 0x00000001, 0x00000000);
546 nv_mask(dev, 0x004168, 0x00000100, 0x00000000); 572 nv_mask(device, 0x004168, 0x00000100, 0x00000000);
547 } else { 573 } else {
548 nv_mask(dev, 0x004000, 0x00000001, 0x00000000); 574 nv_mask(device, 0x004000, 0x00000001, 0x00000000);
549 nv_mask(dev, 0x004128, 0x00000001, 0x00000000); 575 nv_mask(device, 0x004128, 0x00000001, 0x00000000);
550 nv_mask(dev, 0x004128, 0x00000100, 0x00000000); 576 nv_mask(device, 0x004128, 0x00000100, 0x00000000);
551 } 577 }
552} 578}
553 579
554int 580int
555nva3_pm_clocks_set(struct drm_device *dev, void *pre_state) 581nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
556{ 582{
557 struct drm_nouveau_private *dev_priv = dev->dev_private; 583 struct nouveau_device *device = nouveau_dev(dev);
584 struct nouveau_drm *drm = nouveau_drm(dev);
558 struct nva3_pm_state *info = pre_state; 585 struct nva3_pm_state *info = pre_state;
559 unsigned long flags;
560 int ret = -EAGAIN; 586 int ret = -EAGAIN;
561 587
562 /* prevent any new grctx switches from starting */ 588 /* prevent any new grctx switches from starting */
563 spin_lock_irqsave(&dev_priv->context_switch_lock, flags); 589 nv_wr32(device, 0x400324, 0x00000000);
564 nv_wr32(dev, 0x400324, 0x00000000); 590 nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
565 nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
566 /* wait for any pending grctx switches to complete */ 591 /* wait for any pending grctx switches to complete */
567 if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) { 592 if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
568 NV_ERROR(dev, "pm: ctxprog didn't go idle\n"); 593 NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
569 goto cleanup; 594 goto cleanup;
570 } 595 }
571 /* freeze PFIFO */ 596 /* freeze PFIFO */
572 nv_mask(dev, 0x002504, 0x00000001, 0x00000001); 597 nv_mask(device, 0x002504, 0x00000001, 0x00000001);
573 if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) { 598 if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
574 NV_ERROR(dev, "pm: fifo didn't go idle\n"); 599 NV_ERROR(drm, "pm: fifo didn't go idle\n");
575 goto cleanup; 600 goto cleanup;
576 } 601 }
577 602
@@ -587,14 +612,13 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
587 612
588cleanup: 613cleanup:
589 /* unfreeze PFIFO */ 614 /* unfreeze PFIFO */
590 nv_mask(dev, 0x002504, 0x00000001, 0x00000000); 615 nv_mask(device, 0x002504, 0x00000001, 0x00000000);
591 /* restore ctxprog to normal */ 616 /* restore ctxprog to normal */
592 nv_wr32(dev, 0x400324, 0x00000000); 617 nv_wr32(device, 0x400324, 0x00000000);
593 nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */ 618 nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
594 /* unblock it if necessary */ 619 /* unblock it if necessary */
595 if (nv_rd32(dev, 0x400308) == 0x0050001c) 620 if (nv_rd32(device, 0x400308) == 0x0050001c)
596 nv_mask(dev, 0x400824, 0x10000000, 0x10000000); 621 nv_mask(device, 0x400824, 0x10000000, 0x10000000);
597 spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
598 kfree(info); 622 kfree(info);
599 return ret; 623 return ret;
600} 624}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.c b/drivers/gpu/drm/nouveau/nvc0_copy.c
deleted file mode 100644
index 88a922d60822..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_copy.c
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "nouveau_drv.h"
28#include "nouveau_util.h"
29#include "nouveau_vm.h"
30#include "nouveau_ramht.h"
31#include "nvc0_copy.fuc.h"
32
33struct nvc0_copy_engine {
34 struct nouveau_exec_engine base;
35 u32 irq;
36 u32 pmc;
37 u32 fuc;
38 u32 ctx;
39};
40
41static int
42nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
43{
44 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
45 struct drm_device *dev = chan->dev;
46 struct drm_nouveau_private *dev_priv = dev->dev_private;
47 struct nouveau_gpuobj *ramin = chan->ramin;
48 struct nouveau_gpuobj *ctx = NULL;
49 int ret;
50
51 ret = nouveau_gpuobj_new(dev, chan, 256, 256,
52 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
53 NVOBJ_FLAG_ZERO_ALLOC, &ctx);
54 if (ret)
55 return ret;
56
57 nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
58 nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
59 dev_priv->engine.instmem.flush(dev);
60
61 chan->engctx[engine] = ctx;
62 return 0;
63}
64
65static int
66nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
67 u32 handle, u16 class)
68{
69 return 0;
70}
71
72static void
73nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
74{
75 struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
76 struct nouveau_gpuobj *ctx = chan->engctx[engine];
77 struct drm_device *dev = chan->dev;
78 u32 inst;
79
80 inst = (chan->ramin->vinst >> 12);
81 inst |= 0x40000000;
82
83 /* disable fifo access */
84 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
85 /* mark channel as unloaded if it's currently active */
86 if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
87 nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
88 /* mark next channel as invalid if it's about to be loaded */
89 if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
90 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
91 /* restore fifo access */
92 nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
93
94 nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
95 nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
96 nouveau_gpuobj_ref(NULL, &ctx);
97
98 chan->engctx[engine] = ctx;
99}
100
101static int
102nvc0_copy_init(struct drm_device *dev, int engine)
103{
104 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
105 int i;
106
107 nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
108 nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
109 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
110
111 nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
112 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
113 nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
114
115 nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
116 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
117 if ((i & 0x3f) == 0)
118 nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
119 nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
120 }
121
122 nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
123 nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
124 nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
125 nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
126 return 0;
127}
128
129static int
130nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
131{
132 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
133
134 nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
135
136 /* trigger fuc context unload */
137 nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
138 nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
139 nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
140 nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
141
142 nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
143 return 0;
144}
145
146static struct nouveau_enum nvc0_copy_isr_error_name[] = {
147 { 0x0001, "ILLEGAL_MTHD" },
148 { 0x0002, "INVALID_ENUM" },
149 { 0x0003, "INVALID_BITFIELD" },
150 {}
151};
152
153static void
154nvc0_copy_isr(struct drm_device *dev, int engine)
155{
156 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
157 u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
158 u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
159 u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
160 u32 chid = nvc0_graph_isr_chid(dev, inst);
161 u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
162 u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
163 u32 mthd = (addr & 0x07ff) << 2;
164 u32 subc = (addr & 0x3800) >> 11;
165 u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
166
167 if (stat & 0x00000040) {
168 NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
169 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
170 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
171 chid, inst, subc, mthd, data);
172 nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
173 stat &= ~0x00000040;
174 }
175
176 if (stat) {
177 NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
178 nv_wr32(dev, pcopy->fuc + 0x004, stat);
179 }
180}
181
182static void
183nvc0_copy_isr_0(struct drm_device *dev)
184{
185 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
186}
187
188static void
189nvc0_copy_isr_1(struct drm_device *dev)
190{
191 nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
192}
193
194static void
195nvc0_copy_destroy(struct drm_device *dev, int engine)
196{
197 struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
198
199 nouveau_irq_unregister(dev, pcopy->irq);
200
201 if (engine == NVOBJ_ENGINE_COPY0)
202 NVOBJ_ENGINE_DEL(dev, COPY0);
203 else
204 NVOBJ_ENGINE_DEL(dev, COPY1);
205 kfree(pcopy);
206}
207
208int
209nvc0_copy_create(struct drm_device *dev, int engine)
210{
211 struct nvc0_copy_engine *pcopy;
212
213 pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
214 if (!pcopy)
215 return -ENOMEM;
216
217 pcopy->base.destroy = nvc0_copy_destroy;
218 pcopy->base.init = nvc0_copy_init;
219 pcopy->base.fini = nvc0_copy_fini;
220 pcopy->base.context_new = nvc0_copy_context_new;
221 pcopy->base.context_del = nvc0_copy_context_del;
222 pcopy->base.object_new = nvc0_copy_object_new;
223
224 if (engine == 0) {
225 pcopy->irq = 5;
226 pcopy->pmc = 0x00000040;
227 pcopy->fuc = 0x104000;
228 pcopy->ctx = 0x0230;
229 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
230 NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
231 NVOBJ_CLASS(dev, 0x90b5, COPY0);
232 } else {
233 pcopy->irq = 6;
234 pcopy->pmc = 0x00000080;
235 pcopy->fuc = 0x105000;
236 pcopy->ctx = 0x0240;
237 nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
238 NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
239 NVOBJ_CLASS(dev, 0x90b8, COPY1);
240 }
241
242 return 0;
243}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
deleted file mode 100644
index 7da32a9ef08e..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include <drm/nouveau_drm.h>
28
29struct nvc0_fb_priv {
30 struct page *r100c10_page;
31 dma_addr_t r100c10;
32};
33
34static inline void
35nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
36{
37 u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
38 u32 stat = nv_rd32(dev, subp_base + 0x020);
39
40 if (stat) {
41 NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
42 nv_wr32(dev, subp_base + 0x020, stat);
43 }
44}
45
46static void
47nvc0_mfb_isr(struct drm_device *dev)
48{
49 u32 units = nv_rd32(dev, 0x00017c);
50 while (units) {
51 u32 subp, unit = ffs(units) - 1;
52 for (subp = 0; subp < 2; subp++)
53 nvc0_mfb_subp_isr(dev, unit, subp);
54 units &= ~(1 << unit);
55 }
56
57 /* we do something horribly wrong and upset PMFB a lot, so mask off
58 * interrupts from it after the first one until it's fixed
59 */
60 nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
61}
62
63static void
64nvc0_fb_destroy(struct drm_device *dev)
65{
66 struct drm_nouveau_private *dev_priv = dev->dev_private;
67 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
68 struct nvc0_fb_priv *priv = pfb->priv;
69
70 nouveau_irq_unregister(dev, 25);
71
72 if (priv->r100c10_page) {
73 pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
74 PCI_DMA_BIDIRECTIONAL);
75 __free_page(priv->r100c10_page);
76 }
77
78 kfree(priv);
79 pfb->priv = NULL;
80}
81
82static int
83nvc0_fb_create(struct drm_device *dev)
84{
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
87 struct nvc0_fb_priv *priv;
88
89 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
90 if (!priv)
91 return -ENOMEM;
92 pfb->priv = priv;
93
94 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
95 if (!priv->r100c10_page) {
96 nvc0_fb_destroy(dev);
97 return -ENOMEM;
98 }
99
100 priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
101 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
102 if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
103 nvc0_fb_destroy(dev);
104 return -EFAULT;
105 }
106
107 nouveau_irq_register(dev, 25, nvc0_mfb_isr);
108 return 0;
109}
110
111int
112nvc0_fb_init(struct drm_device *dev)
113{
114 struct drm_nouveau_private *dev_priv = dev->dev_private;
115 struct nvc0_fb_priv *priv;
116 int ret;
117
118 if (!dev_priv->engine.fb.priv) {
119 ret = nvc0_fb_create(dev);
120 if (ret)
121 return ret;
122 }
123 priv = dev_priv->engine.fb.priv;
124
125 nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
126 nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
127 return 0;
128}
129
130void
131nvc0_fb_takedown(struct drm_device *dev)
132{
133 nvc0_fb_destroy(dev);
134}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index ade005fa9de1..9dcd30f3e1e0 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,20 +22,16 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h" 26#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fbcon.h" 27#include "nouveau_fbcon.h"
30#include "nouveau_mm.h"
31 28
32int 29int
33nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 30nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
34{ 31{
35 struct nouveau_fbdev *nfbdev = info->par; 32 struct nouveau_fbdev *nfbdev = info->par;
36 struct drm_device *dev = nfbdev->dev; 33 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
37 struct drm_nouveau_private *dev_priv = dev->dev_private; 34 struct nouveau_channel *chan = drm->channel;
38 struct nouveau_channel *chan = dev_priv->channel;
39 int ret; 35 int ret;
40 36
41 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); 37 ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
69nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) 65nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
70{ 66{
71 struct nouveau_fbdev *nfbdev = info->par; 67 struct nouveau_fbdev *nfbdev = info->par;
72 struct drm_device *dev = nfbdev->dev; 68 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
73 struct drm_nouveau_private *dev_priv = dev->dev_private; 69 struct nouveau_channel *chan = drm->channel;
74 struct nouveau_channel *chan = dev_priv->channel;
75 int ret; 70 int ret;
76 71
77 ret = RING_SPACE(chan, 12); 72 ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
98nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 93nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
99{ 94{
100 struct nouveau_fbdev *nfbdev = info->par; 95 struct nouveau_fbdev *nfbdev = info->par;
101 struct drm_device *dev = nfbdev->dev; 96 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
102 struct drm_nouveau_private *dev_priv = dev->dev_private; 97 struct nouveau_channel *chan = drm->channel;
103 struct nouveau_channel *chan = dev_priv->channel;
104 uint32_t width, dwords, *data = (uint32_t *)image->data; 98 uint32_t width, dwords, *data = (uint32_t *)image->data;
105 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); 99 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
106 uint32_t *palette = info->pseudo_palette; 100 uint32_t *palette = info->pseudo_palette;
@@ -157,12 +151,14 @@ nvc0_fbcon_accel_init(struct fb_info *info)
157{ 151{
158 struct nouveau_fbdev *nfbdev = info->par; 152 struct nouveau_fbdev *nfbdev = info->par;
159 struct drm_device *dev = nfbdev->dev; 153 struct drm_device *dev = nfbdev->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb; 154 struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
155 struct nouveau_drm *drm = nouveau_drm(dev);
156 struct nouveau_channel *chan = drm->channel;
157 struct nouveau_object *object;
163 int ret, format; 158 int ret, format;
164 159
165 ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); 160 ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
161 0x902d, NULL, 0, &object);
166 if (ret) 162 if (ret)
167 return ret; 163 return ret;
168 164
@@ -202,9 +198,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
202 198
203 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1); 199 BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
204 OUT_RING (chan, 0x0000902d); 200 OUT_RING (chan, 0x0000902d);
205 BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
206 OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
207 OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
208 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1); 201 BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
209 OUT_RING (chan, 0); 202 OUT_RING (chan, 0);
210 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1); 203 BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2e666d0c4048..53299eac9676 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,29 +22,44 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include <core/object.h>
26#include "nouveau_drv.h" 26#include <core/client.h>
27#include <core/class.h>
28
29#include <engine/fifo.h>
30
31#include "nouveau_drm.h"
27#include "nouveau_dma.h" 32#include "nouveau_dma.h"
28#include "nouveau_fifo.h"
29#include "nouveau_ramht.h"
30#include "nouveau_fence.h" 33#include "nouveau_fence.h"
31 34
35#include "nv50_display.h"
36
32struct nvc0_fence_priv { 37struct nvc0_fence_priv {
33 struct nouveau_fence_priv base; 38 struct nouveau_fence_priv base;
34 struct nouveau_bo *bo; 39 struct nouveau_bo *bo;
40 u32 *suspend;
35}; 41};
36 42
37struct nvc0_fence_chan { 43struct nvc0_fence_chan {
38 struct nouveau_fence_chan base; 44 struct nouveau_fence_chan base;
39 struct nouveau_vma vma; 45 struct nouveau_vma vma;
46 struct nouveau_vma dispc_vma[4];
40}; 47};
41 48
49u64
50nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
51{
52 struct nvc0_fence_chan *fctx = chan->fence;
53 return fctx->dispc_vma[crtc].offset;
54}
55
42static int 56static int
43nvc0_fence_emit(struct nouveau_fence *fence) 57nvc0_fence_emit(struct nouveau_fence *fence)
44{ 58{
45 struct nouveau_channel *chan = fence->channel; 59 struct nouveau_channel *chan = fence->channel;
46 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 60 struct nvc0_fence_chan *fctx = chan->fence;
47 u64 addr = fctx->vma.offset + chan->id * 16; 61 struct nouveau_fifo_chan *fifo = (void *)chan->object;
62 u64 addr = fctx->vma.offset + fifo->chid * 16;
48 int ret; 63 int ret;
49 64
50 ret = RING_SPACE(chan, 5); 65 ret = RING_SPACE(chan, 5);
@@ -64,8 +79,9 @@ static int
64nvc0_fence_sync(struct nouveau_fence *fence, 79nvc0_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan) 80 struct nouveau_channel *prev, struct nouveau_channel *chan)
66{ 81{
67 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; 82 struct nvc0_fence_chan *fctx = chan->fence;
68 u64 addr = fctx->vma.offset + prev->id * 16; 83 struct nouveau_fifo_chan *fifo = (void *)prev->object;
84 u64 addr = fctx->vma.offset + fifo->chid * 16;
69 int ret; 85 int ret;
70 86
71 ret = RING_SPACE(chan, 5); 87 ret = RING_SPACE(chan, 5);
@@ -85,91 +101,135 @@ nvc0_fence_sync(struct nouveau_fence *fence,
85static u32 101static u32
86nvc0_fence_read(struct nouveau_channel *chan) 102nvc0_fence_read(struct nouveau_channel *chan)
87{ 103{
88 struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE); 104 struct nouveau_fifo_chan *fifo = (void *)chan->object;
89 return nouveau_bo_rd32(priv->bo, chan->id * 16/4); 105 struct nvc0_fence_priv *priv = chan->drm->fence;
106 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
90} 107}
91 108
92static void 109static void
93nvc0_fence_context_del(struct nouveau_channel *chan, int engine) 110nvc0_fence_context_del(struct nouveau_channel *chan)
94{ 111{
95 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine); 112 struct drm_device *dev = chan->drm->dev;
96 struct nvc0_fence_chan *fctx = chan->engctx[engine]; 113 struct nvc0_fence_priv *priv = chan->drm->fence;
114 struct nvc0_fence_chan *fctx = chan->fence;
115 int i;
116
117 if (nv_device(chan->drm->device)->card_type >= NV_D0) {
118 for (i = 0; i < dev->mode_config.num_crtc; i++) {
119 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 }
122 } else
123 if (nv_device(chan->drm->device)->card_type >= NV_50) {
124 for (i = 0; i < dev->mode_config.num_crtc; i++) {
125 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
126 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
127 }
128 }
97 129
98 nouveau_bo_vma_del(priv->bo, &fctx->vma); 130 nouveau_bo_vma_del(priv->bo, &fctx->vma);
99 nouveau_fence_context_del(&fctx->base); 131 nouveau_fence_context_del(&fctx->base);
100 chan->engctx[engine] = NULL; 132 chan->fence = NULL;
101 kfree(fctx); 133 kfree(fctx);
102} 134}
103 135
104static int 136static int
105nvc0_fence_context_new(struct nouveau_channel *chan, int engine) 137nvc0_fence_context_new(struct nouveau_channel *chan)
106{ 138{
107 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine); 139 struct nouveau_fifo_chan *fifo = (void *)chan->object;
140 struct nouveau_client *client = nouveau_client(fifo);
141 struct nvc0_fence_priv *priv = chan->drm->fence;
108 struct nvc0_fence_chan *fctx; 142 struct nvc0_fence_chan *fctx;
109 int ret; 143 int ret, i;
110 144
111 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); 145 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
112 if (!fctx) 146 if (!fctx)
113 return -ENOMEM; 147 return -ENOMEM;
114 148
115 nouveau_fence_context_new(&fctx->base); 149 nouveau_fence_context_new(&fctx->base);
116 150
117 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma); 151 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
118 if (ret) 152 if (ret)
119 nvc0_fence_context_del(chan, engine); 153 nvc0_fence_context_del(chan);
154
155 /* map display semaphore buffers into channel's vm */
156 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
157 struct nouveau_bo *bo;
158 if (nv_device(chan->drm->device)->card_type >= NV_D0)
159 bo = nvd0_display_crtc_sema(chan->drm->dev, i);
160 else
161 bo = nv50_display_crtc_sema(chan->drm->dev, i);
120 162
121 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000); 163 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
164 }
165
166 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
122 return ret; 167 return ret;
123} 168}
124 169
125static int 170static bool
126nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend) 171nvc0_fence_suspend(struct nouveau_drm *drm)
127{ 172{
128 return 0; 173 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
174 struct nvc0_fence_priv *priv = drm->fence;
175 int i;
176
177 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
178 if (priv->suspend) {
179 for (i = 0; i <= pfifo->max; i++)
180 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
181 }
182
183 return priv->suspend != NULL;
129} 184}
130 185
131static int 186static void
132nvc0_fence_init(struct drm_device *dev, int engine) 187nvc0_fence_resume(struct nouveau_drm *drm)
133{ 188{
134 return 0; 189 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
190 struct nvc0_fence_priv *priv = drm->fence;
191 int i;
192
193 if (priv->suspend) {
194 for (i = 0; i <= pfifo->max; i++)
195 nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
196 vfree(priv->suspend);
197 priv->suspend = NULL;
198 }
135} 199}
136 200
137static void 201static void
138nvc0_fence_destroy(struct drm_device *dev, int engine) 202nvc0_fence_destroy(struct nouveau_drm *drm)
139{ 203{
140 struct drm_nouveau_private *dev_priv = dev->dev_private; 204 struct nvc0_fence_priv *priv = drm->fence;
141 struct nvc0_fence_priv *priv = nv_engine(dev, engine);
142
143 nouveau_bo_unmap(priv->bo); 205 nouveau_bo_unmap(priv->bo);
144 nouveau_bo_ref(NULL, &priv->bo); 206 nouveau_bo_ref(NULL, &priv->bo);
145 dev_priv->eng[engine] = NULL; 207 drm->fence = NULL;
146 kfree(priv); 208 kfree(priv);
147} 209}
148 210
149int 211int
150nvc0_fence_create(struct drm_device *dev) 212nvc0_fence_create(struct nouveau_drm *drm)
151{ 213{
152 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); 214 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 struct nvc0_fence_priv *priv; 215 struct nvc0_fence_priv *priv;
155 int ret; 216 int ret;
156 217
157 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 218 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
158 if (!priv) 219 if (!priv)
159 return -ENOMEM; 220 return -ENOMEM;
160 221
161 priv->base.engine.destroy = nvc0_fence_destroy; 222 priv->base.dtor = nvc0_fence_destroy;
162 priv->base.engine.init = nvc0_fence_init; 223 priv->base.suspend = nvc0_fence_suspend;
163 priv->base.engine.fini = nvc0_fence_fini; 224 priv->base.resume = nvc0_fence_resume;
164 priv->base.engine.context_new = nvc0_fence_context_new; 225 priv->base.context_new = nvc0_fence_context_new;
165 priv->base.engine.context_del = nvc0_fence_context_del; 226 priv->base.context_del = nvc0_fence_context_del;
166 priv->base.emit = nvc0_fence_emit; 227 priv->base.emit = nvc0_fence_emit;
167 priv->base.sync = nvc0_fence_sync; 228 priv->base.sync = nvc0_fence_sync;
168 priv->base.read = nvc0_fence_read; 229 priv->base.read = nvc0_fence_read;
169 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
170 230
171 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM, 231 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
172 0, 0, NULL, &priv->bo); 232 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
173 if (ret == 0) { 233 if (ret == 0) {
174 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 234 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
175 if (ret == 0) 235 if (ret == 0)
@@ -179,6 +239,6 @@ nvc0_fence_create(struct drm_device *dev)
179 } 239 }
180 240
181 if (ret) 241 if (ret)
182 nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE); 242 nvc0_fence_destroy(drm);
183 return ret; 243 return ret;
184} 244}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
deleted file mode 100644
index d03ba8631a69..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ /dev/null
@@ -1,477 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31static void nvc0_fifo_isr(struct drm_device *);
32
33struct nvc0_fifo_priv {
34 struct nouveau_fifo_priv base;
35 struct nouveau_gpuobj *playlist[2];
36 int cur_playlist;
37 struct nouveau_vma user_vma;
38 int spoon_nr;
39};
40
41struct nvc0_fifo_chan {
42 struct nouveau_fifo_chan base;
43 struct nouveau_gpuobj *user;
44};
45
46static void
47nvc0_fifo_playlist_update(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
51 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
52 struct nouveau_gpuobj *cur;
53 int i, p;
54
55 cur = priv->playlist[priv->cur_playlist];
56 priv->cur_playlist = !priv->cur_playlist;
57
58 for (i = 0, p = 0; i < 128; i++) {
59 if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
60 continue;
61 nv_wo32(cur, p + 0, i);
62 nv_wo32(cur, p + 4, 0x00000004);
63 p += 8;
64 }
65 pinstmem->flush(dev);
66
67 nv_wr32(dev, 0x002270, cur->vinst >> 12);
68 nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
69 if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
70 NV_ERROR(dev, "PFIFO - playlist update failed\n");
71}
72
73static int
74nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
75{
76 struct drm_device *dev = chan->dev;
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
79 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
80 struct nvc0_fifo_chan *fctx;
81 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
82 int ret, i;
83
84 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
85 if (!fctx)
86 return -ENOMEM;
87
88 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
89 priv->user_vma.offset + (chan->id * 0x1000),
90 PAGE_SIZE);
91 if (!chan->user) {
92 ret = -ENOMEM;
93 goto error;
94 }
95
96 /* allocate vram for control regs, map into polling area */
97 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
98 NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
99 if (ret)
100 goto error;
101
102 nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
103 *(struct nouveau_mem **)fctx->user->node);
104
105 for (i = 0; i < 0x100; i += 4)
106 nv_wo32(chan->ramin, i, 0x00000000);
107 nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
108 nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
109 nv_wo32(chan->ramin, 0x10, 0x0000face);
110 nv_wo32(chan->ramin, 0x30, 0xfffff902);
111 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
112 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
113 upper_32_bits(ib_virt));
114 nv_wo32(chan->ramin, 0x54, 0x00000002);
115 nv_wo32(chan->ramin, 0x84, 0x20400000);
116 nv_wo32(chan->ramin, 0x94, 0x30000001);
117 nv_wo32(chan->ramin, 0x9c, 0x00000100);
118 nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
119 nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
120 nv_wo32(chan->ramin, 0xac, 0x0000001f);
121 nv_wo32(chan->ramin, 0xb8, 0xf8000000);
122 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
123 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
124 pinstmem->flush(dev);
125
126 nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
127 (chan->ramin->vinst >> 12));
128 nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
129 nvc0_fifo_playlist_update(dev);
130
131error:
132 if (ret)
133 priv->base.base.context_del(chan, engine);
134 return ret;
135}
136
137static void
138nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
139{
140 struct nvc0_fifo_chan *fctx = chan->engctx[engine];
141 struct drm_device *dev = chan->dev;
142
143 nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
144 nv_wr32(dev, 0x002634, chan->id);
145 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
146 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
147 nvc0_fifo_playlist_update(dev);
148 nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
149
150 nouveau_gpuobj_ref(NULL, &fctx->user);
151 if (chan->user) {
152 iounmap(chan->user);
153 chan->user = NULL;
154 }
155
156 chan->engctx[engine] = NULL;
157 kfree(fctx);
158}
159
160static int
161nvc0_fifo_init(struct drm_device *dev, int engine)
162{
163 struct drm_nouveau_private *dev_priv = dev->dev_private;
164 struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
165 struct nouveau_channel *chan;
166 int i;
167
168 /* reset PFIFO, enable all available PSUBFIFO areas */
169 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
170 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
171 nv_wr32(dev, 0x000204, 0xffffffff);
172 nv_wr32(dev, 0x002204, 0xffffffff);
173
174 priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
175 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
176
177 /* assign engines to subfifos */
178 if (priv->spoon_nr >= 3) {
179 nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
180 nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
181 nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
182 nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
183 nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
184 nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
185 }
186
187 /* PSUBFIFO[n] */
188 for (i = 0; i < priv->spoon_nr; i++) {
189 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
190 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
191 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
192 }
193
194 nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
195 nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
196
197 nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
198 nv_wr32(dev, 0x002100, 0xffffffff);
199 nv_wr32(dev, 0x002140, 0xbfffffff);
200
201 /* restore PFIFO context table */
202 for (i = 0; i < 128; i++) {
203 chan = dev_priv->channels.ptr[i];
204 if (!chan || !chan->engctx[engine])
205 continue;
206
207 nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
208 (chan->ramin->vinst >> 12));
209 nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
210 }
211 nvc0_fifo_playlist_update(dev);
212
213 return 0;
214}
215
216static int
217nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
218{
219 int i;
220
221 for (i = 0; i < 128; i++) {
222 if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
223 continue;
224
225 nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
226 nv_wr32(dev, 0x002634, i);
227 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
228 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
229 i, nv_rd32(dev, 0x002634));
230 return -EBUSY;
231 }
232 }
233
234 nv_wr32(dev, 0x002140, 0x00000000);
235 return 0;
236}
237
238
239struct nouveau_enum nvc0_fifo_fault_unit[] = {
240 { 0x00, "PGRAPH" },
241 { 0x03, "PEEPHOLE" },
242 { 0x04, "BAR1" },
243 { 0x05, "BAR3" },
244 { 0x07, "PFIFO" },
245 { 0x10, "PBSP" },
246 { 0x11, "PPPP" },
247 { 0x13, "PCOUNTER" },
248 { 0x14, "PVP" },
249 { 0x15, "PCOPY0" },
250 { 0x16, "PCOPY1" },
251 { 0x17, "PDAEMON" },
252 {}
253};
254
255struct nouveau_enum nvc0_fifo_fault_reason[] = {
256 { 0x00, "PT_NOT_PRESENT" },
257 { 0x01, "PT_TOO_SHORT" },
258 { 0x02, "PAGE_NOT_PRESENT" },
259 { 0x03, "VM_LIMIT_EXCEEDED" },
260 { 0x04, "NO_CHANNEL" },
261 { 0x05, "PAGE_SYSTEM_ONLY" },
262 { 0x06, "PAGE_READ_ONLY" },
263 { 0x0a, "COMPRESSED_SYSRAM" },
264 { 0x0c, "INVALID_STORAGE_TYPE" },
265 {}
266};
267
268struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
269 { 0x01, "PCOPY0" },
270 { 0x02, "PCOPY1" },
271 { 0x04, "DISPATCH" },
272 { 0x05, "CTXCTL" },
273 { 0x06, "PFIFO" },
274 { 0x07, "BAR_READ" },
275 { 0x08, "BAR_WRITE" },
276 { 0x0b, "PVP" },
277 { 0x0c, "PPPP" },
278 { 0x0d, "PBSP" },
279 { 0x11, "PCOUNTER" },
280 { 0x12, "PDAEMON" },
281 { 0x14, "CCACHE" },
282 { 0x15, "CCACHE_POST" },
283 {}
284};
285
286struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
287 { 0x01, "TEX" },
288 { 0x0c, "ESETUP" },
289 { 0x0e, "CTXCTL" },
290 { 0x0f, "PROP" },
291 {}
292};
293
294struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
295/* { 0x00008000, "" } seen with null ib push */
296 { 0x00200000, "ILLEGAL_MTHD" },
297 { 0x00800000, "EMPTY_SUBC" },
298 {}
299};
300
301static void
302nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
303{
304 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
305 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
306 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
307 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
308 u32 client = (stat & 0x00001f00) >> 8;
309
310 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
311 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
312 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
313 printk("] from ");
314 nouveau_enum_print(nvc0_fifo_fault_unit, unit);
315 if (stat & 0x00000040) {
316 printk("/");
317 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
318 } else {
319 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
320 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
321 }
322 printk(" on channel 0x%010llx\n", (u64)inst << 12);
323}
324
325static int
326nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
327{
328 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
329 struct drm_nouveau_private *dev_priv = dev->dev_private;
330 struct nouveau_channel *chan = NULL;
331 unsigned long flags;
332 int ret = -EINVAL;
333
334 spin_lock_irqsave(&dev_priv->channels.lock, flags);
335 if (likely(chid >= 0 && chid < priv->base.channels)) {
336 chan = dev_priv->channels.ptr[chid];
337 if (likely(chan))
338 ret = nouveau_finish_page_flip(chan, NULL);
339 }
340 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
341 return ret;
342}
343
344static void
345nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
346{
347 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
348 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
349 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
350 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
351 u32 subc = (addr & 0x00070000);
352 u32 mthd = (addr & 0x00003ffc);
353 u32 show = stat;
354
355 if (stat & 0x00200000) {
356 if (mthd == 0x0054) {
357 if (!nvc0_fifo_page_flip(dev, chid))
358 show &= ~0x00200000;
359 }
360 }
361
362 if (show) {
363 NV_INFO(dev, "PFIFO%d:", unit);
364 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
365 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
366 unit, chid, subc, mthd, data);
367 }
368
369 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
370 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
371}
372
373static void
374nvc0_fifo_isr(struct drm_device *dev)
375{
376 u32 mask = nv_rd32(dev, 0x002140);
377 u32 stat = nv_rd32(dev, 0x002100) & mask;
378
379 if (stat & 0x00000100) {
380 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
381 nv_wr32(dev, 0x002100, 0x00000100);
382 stat &= ~0x00000100;
383 }
384
385 if (stat & 0x10000000) {
386 u32 units = nv_rd32(dev, 0x00259c);
387 u32 u = units;
388
389 while (u) {
390 int i = ffs(u) - 1;
391 nvc0_fifo_isr_vm_fault(dev, i);
392 u &= ~(1 << i);
393 }
394
395 nv_wr32(dev, 0x00259c, units);
396 stat &= ~0x10000000;
397 }
398
399 if (stat & 0x20000000) {
400 u32 units = nv_rd32(dev, 0x0025a0);
401 u32 u = units;
402
403 while (u) {
404 int i = ffs(u) - 1;
405 nvc0_fifo_isr_subfifo_intr(dev, i);
406 u &= ~(1 << i);
407 }
408
409 nv_wr32(dev, 0x0025a0, units);
410 stat &= ~0x20000000;
411 }
412
413 if (stat & 0x40000000) {
414 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
415 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
416 stat &= ~0x40000000;
417 }
418
419 if (stat) {
420 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
421 nv_wr32(dev, 0x002100, stat);
422 nv_wr32(dev, 0x002140, 0);
423 }
424}
425
426static void
427nvc0_fifo_destroy(struct drm_device *dev, int engine)
428{
429 struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
430 struct drm_nouveau_private *dev_priv = dev->dev_private;
431
432 nouveau_vm_put(&priv->user_vma);
433 nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
434 nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
435
436 dev_priv->eng[engine] = NULL;
437 kfree(priv);
438}
439
440int
441nvc0_fifo_create(struct drm_device *dev)
442{
443 struct drm_nouveau_private *dev_priv = dev->dev_private;
444 struct nvc0_fifo_priv *priv;
445 int ret;
446
447 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
448 if (!priv)
449 return -ENOMEM;
450
451 priv->base.base.destroy = nvc0_fifo_destroy;
452 priv->base.base.init = nvc0_fifo_init;
453 priv->base.base.fini = nvc0_fifo_fini;
454 priv->base.base.context_new = nvc0_fifo_context_new;
455 priv->base.base.context_del = nvc0_fifo_context_del;
456 priv->base.channels = 128;
457 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
458
459 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
460 if (ret)
461 goto error;
462
463 ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
464 if (ret)
465 goto error;
466
467 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
468 12, NV_MEM_ACCESS_RW, &priv->user_vma);
469 if (ret)
470 goto error;
471
472 nouveau_irq_register(dev, 8, nvc0_fifo_isr);
473error:
474 if (ret)
475 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
476 return ret;
477}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
deleted file mode 100644
index 59670acad7b9..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ /dev/null
@@ -1,897 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include <drm/drmP.h>
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nvc0_graph.h"
35#include "nvc0_grhub.fuc.h"
36#include "nvc0_grgpc.fuc.h"
37
38static void
39nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
40{
41 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
42 nv_rd32(dev, base + 0x400));
43 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
44 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
45 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
46 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
47 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
48 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
49}
50
51static void
52nvc0_graph_ctxctl_debug(struct drm_device *dev)
53{
54 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
55 u32 gpc;
56
57 nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
58 for (gpc = 0; gpc < gpcnr; gpc++)
59 nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
60}
61
62static int
63nvc0_graph_load_context(struct nouveau_channel *chan)
64{
65 struct drm_device *dev = chan->dev;
66
67 nv_wr32(dev, 0x409840, 0x00000030);
68 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
69 nv_wr32(dev, 0x409504, 0x00000003);
70 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
71 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
72
73 return 0;
74}
75
76static int
77nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
78{
79 nv_wr32(dev, 0x409840, 0x00000003);
80 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
81 nv_wr32(dev, 0x409504, 0x00000009);
82 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
83 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
84 return -EBUSY;
85 }
86
87 return 0;
88}
89
90static int
91nvc0_graph_construct_context(struct nouveau_channel *chan)
92{
93 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
94 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
95 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
96 struct drm_device *dev = chan->dev;
97 int ret, i;
98 u32 *ctx;
99
100 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
101 if (!ctx)
102 return -ENOMEM;
103
104 if (!nouveau_ctxfw) {
105 nv_wr32(dev, 0x409840, 0x80000000);
106 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
107 nv_wr32(dev, 0x409504, 0x00000001);
108 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
109 NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
110 nvc0_graph_ctxctl_debug(dev);
111 ret = -EBUSY;
112 goto err;
113 }
114 } else {
115 nvc0_graph_load_context(chan);
116
117 nv_wo32(grch->grctx, 0x1c, 1);
118 nv_wo32(grch->grctx, 0x20, 0);
119 nv_wo32(grch->grctx, 0x28, 0);
120 nv_wo32(grch->grctx, 0x2c, 0);
121 dev_priv->engine.instmem.flush(dev);
122 }
123
124 ret = nvc0_grctx_generate(chan);
125 if (ret)
126 goto err;
127
128 if (!nouveau_ctxfw) {
129 nv_wr32(dev, 0x409840, 0x80000000);
130 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
131 nv_wr32(dev, 0x409504, 0x00000002);
132 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
133 NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
134 nvc0_graph_ctxctl_debug(dev);
135 ret = -EBUSY;
136 goto err;
137 }
138 } else {
139 ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
140 if (ret)
141 goto err;
142 }
143
144 for (i = 0; i < priv->grctx_size; i += 4)
145 ctx[i / 4] = nv_ro32(grch->grctx, i);
146
147 priv->grctx_vals = ctx;
148 return 0;
149
150err:
151 kfree(ctx);
152 return ret;
153}
154
155static int
156nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157{
158 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
159 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
160 struct drm_device *dev = chan->dev;
161 struct drm_nouveau_private *dev_priv = dev->dev_private;
162 int i = 0, gpc, tp, ret;
163
164 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
165 &grch->unk408004);
166 if (ret)
167 return ret;
168
169 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
170 &grch->unk40800c);
171 if (ret)
172 return ret;
173
174 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
175 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
176 &grch->unk418810);
177 if (ret)
178 return ret;
179
180 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
181 &grch->mmio);
182 if (ret)
183 return ret;
184
185
186 nv_wo32(grch->mmio, i++ * 4, 0x00408004);
187 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
188 nv_wo32(grch->mmio, i++ * 4, 0x00408008);
189 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
190
191 nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
192 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
193 nv_wo32(grch->mmio, i++ * 4, 0x00408010);
194 nv_wo32(grch->mmio, i++ * 4, 0x80000000);
195
196 nv_wo32(grch->mmio, i++ * 4, 0x00418810);
197 nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
198 nv_wo32(grch->mmio, i++ * 4, 0x00419848);
199 nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
200
201 nv_wo32(grch->mmio, i++ * 4, 0x00419004);
202 nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
203 nv_wo32(grch->mmio, i++ * 4, 0x00419008);
204 nv_wo32(grch->mmio, i++ * 4, 0x00000000);
205
206 nv_wo32(grch->mmio, i++ * 4, 0x00418808);
207 nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
208 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
209 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
210
211 if (dev_priv->chipset != 0xc1) {
212 u32 magic = 0x02180000;
213 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
214 nv_wo32(grch->mmio, i++ * 4, magic);
215 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
216 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
217 u32 reg = TP_UNIT(gpc, tp, 0x520);
218 nv_wo32(grch->mmio, i++ * 4, reg);
219 nv_wo32(grch->mmio, i++ * 4, magic);
220 magic += 0x0324;
221 }
222 }
223 } else {
224 u32 magic = 0x02180000;
225 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
226 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
227 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
228 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
229 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
230 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
231 u32 reg = TP_UNIT(gpc, tp, 0x520);
232 nv_wo32(grch->mmio, i++ * 4, reg);
233 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
234 magic += 0x0324;
235 }
236 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
237 u32 reg = TP_UNIT(gpc, tp, 0x544);
238 nv_wo32(grch->mmio, i++ * 4, reg);
239 nv_wo32(grch->mmio, i++ * 4, magic);
240 magic += 0x0324;
241 }
242 }
243 }
244
245 grch->mmio_nr = i / 2;
246 return 0;
247}
248
249static int
250nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
251{
252 struct drm_device *dev = chan->dev;
253 struct drm_nouveau_private *dev_priv = dev->dev_private;
254 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
255 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
256 struct nvc0_graph_chan *grch;
257 struct nouveau_gpuobj *grctx;
258 int ret, i;
259
260 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
261 if (!grch)
262 return -ENOMEM;
263 chan->engctx[NVOBJ_ENGINE_GR] = grch;
264
265 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
266 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
267 &grch->grctx);
268 if (ret)
269 goto error;
270 grctx = grch->grctx;
271
272 ret = nvc0_graph_create_context_mmio_list(chan);
273 if (ret)
274 goto error;
275
276 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
277 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
278 pinstmem->flush(dev);
279
280 if (!priv->grctx_vals) {
281 ret = nvc0_graph_construct_context(chan);
282 if (ret)
283 goto error;
284 }
285
286 for (i = 0; i < priv->grctx_size; i += 4)
287 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
288
289 if (!nouveau_ctxfw) {
290 nv_wo32(grctx, 0x00, grch->mmio_nr);
291 nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
292 } else {
293 nv_wo32(grctx, 0xf4, 0);
294 nv_wo32(grctx, 0xf8, 0);
295 nv_wo32(grctx, 0x10, grch->mmio_nr);
296 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
297 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
298 nv_wo32(grctx, 0x1c, 1);
299 nv_wo32(grctx, 0x20, 0);
300 nv_wo32(grctx, 0x28, 0);
301 nv_wo32(grctx, 0x2c, 0);
302 }
303 pinstmem->flush(dev);
304 return 0;
305
306error:
307 priv->base.context_del(chan, engine);
308 return ret;
309}
310
311static void
312nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
313{
314 struct nvc0_graph_chan *grch = chan->engctx[engine];
315
316 nouveau_gpuobj_ref(NULL, &grch->mmio);
317 nouveau_gpuobj_ref(NULL, &grch->unk418810);
318 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
319 nouveau_gpuobj_ref(NULL, &grch->unk408004);
320 nouveau_gpuobj_ref(NULL, &grch->grctx);
321 chan->engctx[engine] = NULL;
322}
323
324static int
325nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
326 u32 handle, u16 class)
327{
328 return 0;
329}
330
331static int
332nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
333{
334 return 0;
335}
336
337static void
338nvc0_graph_init_obj418880(struct drm_device *dev)
339{
340 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
341 int i;
342
343 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
344 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
345 for (i = 0; i < 4; i++)
346 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
347 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
348 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
349}
350
351static void
352nvc0_graph_init_regs(struct drm_device *dev)
353{
354 nv_wr32(dev, 0x400080, 0x003083c2);
355 nv_wr32(dev, 0x400088, 0x00006fe7);
356 nv_wr32(dev, 0x40008c, 0x00000000);
357 nv_wr32(dev, 0x400090, 0x00000030);
358 nv_wr32(dev, 0x40013c, 0x013901f7);
359 nv_wr32(dev, 0x400140, 0x00000100);
360 nv_wr32(dev, 0x400144, 0x00000000);
361 nv_wr32(dev, 0x400148, 0x00000110);
362 nv_wr32(dev, 0x400138, 0x00000000);
363 nv_wr32(dev, 0x400130, 0x00000000);
364 nv_wr32(dev, 0x400134, 0x00000000);
365 nv_wr32(dev, 0x400124, 0x00000002);
366}
367
368static void
369nvc0_graph_init_gpc_0(struct drm_device *dev)
370{
371 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
372 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
373 u32 data[TP_MAX / 8];
374 u8 tpnr[GPC_MAX];
375 int i, gpc, tpc;
376
377 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
378
379 /*
380 * TP ROP UNKVAL(magic_not_rop_nr)
381 * 450: 4/0/0/0 2 3
382 * 460: 3/4/0/0 4 1
383 * 465: 3/4/4/0 4 7
384 * 470: 3/3/4/4 5 5
385 * 480: 3/4/4/4 6 6
386 */
387
388 memset(data, 0x00, sizeof(data));
389 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
390 for (i = 0, gpc = -1; i < priv->tp_total; i++) {
391 do {
392 gpc = (gpc + 1) % priv->gpc_nr;
393 } while (!tpnr[gpc]);
394 tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
395
396 data[i / 8] |= tpc << ((i % 8) * 4);
397 }
398
399 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
400 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
401 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
402 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
403
404 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
405 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
406 priv->tp_nr[gpc]);
407 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
408 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
409 }
410
411 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
412 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
413}
414
415static void
416nvc0_graph_init_units(struct drm_device *dev)
417{
418 nv_wr32(dev, 0x409c24, 0x000f0000);
419 nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
420 nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
421 nv_wr32(dev, 0x408030, 0xc0000000);
422 nv_wr32(dev, 0x40601c, 0xc0000000);
423 nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
424 nv_wr32(dev, 0x406018, 0xc0000000);
425 nv_wr32(dev, 0x405840, 0xc0000000);
426 nv_wr32(dev, 0x405844, 0x00ffffff);
427 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
428 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
429}
430
431static void
432nvc0_graph_init_gpc_1(struct drm_device *dev)
433{
434 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
435 int gpc, tp;
436
437 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
438 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
439 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
440 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
441 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
442 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
443 nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
444 nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
445 nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
446 nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
447 nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
448 nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
449 nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
450 }
451 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
452 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
453 }
454}
455
456static void
457nvc0_graph_init_rop(struct drm_device *dev)
458{
459 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
460 int rop;
461
462 for (rop = 0; rop < priv->rop_nr; rop++) {
463 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
464 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
465 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
466 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
467 }
468}
469
470static void
471nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
472 struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
473{
474 int i;
475
476 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
477 for (i = 0; i < data->size / 4; i++)
478 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
479
480 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
481 for (i = 0; i < code->size / 4; i++) {
482 if ((i & 0x3f) == 0)
483 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
484 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
485 }
486}
487
488static int
489nvc0_graph_init_ctxctl(struct drm_device *dev)
490{
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
492 struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
493 u32 r000260;
494 int i;
495
496 if (!nouveau_ctxfw) {
497 /* load HUB microcode */
498 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
499 nv_wr32(dev, 0x4091c0, 0x01000000);
500 for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
501 nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
502
503 nv_wr32(dev, 0x409180, 0x01000000);
504 for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
505 if ((i & 0x3f) == 0)
506 nv_wr32(dev, 0x409188, i >> 6);
507 nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
508 }
509
510 /* load GPC microcode */
511 nv_wr32(dev, 0x41a1c0, 0x01000000);
512 for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
513 nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
514
515 nv_wr32(dev, 0x41a180, 0x01000000);
516 for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
517 if ((i & 0x3f) == 0)
518 nv_wr32(dev, 0x41a188, i >> 6);
519 nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
520 }
521 nv_wr32(dev, 0x000260, r000260);
522
523 /* start HUB ucode running, it'll init the GPCs */
524 nv_wr32(dev, 0x409800, dev_priv->chipset);
525 nv_wr32(dev, 0x40910c, 0x00000000);
526 nv_wr32(dev, 0x409100, 0x00000002);
527 if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
528 NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
529 nvc0_graph_ctxctl_debug(dev);
530 return -EBUSY;
531 }
532
533 priv->grctx_size = nv_rd32(dev, 0x409804);
534 return 0;
535 }
536
537 /* load fuc microcode */
538 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
539 nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
540 nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
541 nv_wr32(dev, 0x000260, r000260);
542
543 /* start both of them running */
544 nv_wr32(dev, 0x409840, 0xffffffff);
545 nv_wr32(dev, 0x41a10c, 0x00000000);
546 nv_wr32(dev, 0x40910c, 0x00000000);
547 nv_wr32(dev, 0x41a100, 0x00000002);
548 nv_wr32(dev, 0x409100, 0x00000002);
549 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
550 NV_INFO(dev, "0x409800 wait failed\n");
551
552 nv_wr32(dev, 0x409840, 0xffffffff);
553 nv_wr32(dev, 0x409500, 0x7fffffff);
554 nv_wr32(dev, 0x409504, 0x00000021);
555
556 nv_wr32(dev, 0x409840, 0xffffffff);
557 nv_wr32(dev, 0x409500, 0x00000000);
558 nv_wr32(dev, 0x409504, 0x00000010);
559 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
560 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
561 return -EBUSY;
562 }
563 priv->grctx_size = nv_rd32(dev, 0x409800);
564
565 nv_wr32(dev, 0x409840, 0xffffffff);
566 nv_wr32(dev, 0x409500, 0x00000000);
567 nv_wr32(dev, 0x409504, 0x00000016);
568 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
569 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
570 return -EBUSY;
571 }
572
573 nv_wr32(dev, 0x409840, 0xffffffff);
574 nv_wr32(dev, 0x409500, 0x00000000);
575 nv_wr32(dev, 0x409504, 0x00000025);
576 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
577 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
578 return -EBUSY;
579 }
580
581 return 0;
582}
583
584static int
585nvc0_graph_init(struct drm_device *dev, int engine)
586{
587 int ret;
588
589 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
590 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
591
592 nvc0_graph_init_obj418880(dev);
593 nvc0_graph_init_regs(dev);
594 /*nvc0_graph_init_unitplemented_magics(dev);*/
595 nvc0_graph_init_gpc_0(dev);
596 /*nvc0_graph_init_unitplemented_c242(dev);*/
597
598 nv_wr32(dev, 0x400500, 0x00010001);
599 nv_wr32(dev, 0x400100, 0xffffffff);
600 nv_wr32(dev, 0x40013c, 0xffffffff);
601
602 nvc0_graph_init_units(dev);
603 nvc0_graph_init_gpc_1(dev);
604 nvc0_graph_init_rop(dev);
605
606 nv_wr32(dev, 0x400108, 0xffffffff);
607 nv_wr32(dev, 0x400138, 0xffffffff);
608 nv_wr32(dev, 0x400118, 0xffffffff);
609 nv_wr32(dev, 0x400130, 0xffffffff);
610 nv_wr32(dev, 0x40011c, 0xffffffff);
611 nv_wr32(dev, 0x400134, 0xffffffff);
612 nv_wr32(dev, 0x400054, 0x34ce3464);
613
614 ret = nvc0_graph_init_ctxctl(dev);
615 if (ret)
616 return ret;
617
618 return 0;
619}
620
621int
622nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
623{
624 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
625 struct drm_nouveau_private *dev_priv = dev->dev_private;
626 struct nouveau_channel *chan;
627 unsigned long flags;
628 int i;
629
630 spin_lock_irqsave(&dev_priv->channels.lock, flags);
631 for (i = 0; i < pfifo->channels; i++) {
632 chan = dev_priv->channels.ptr[i];
633 if (!chan || !chan->ramin)
634 continue;
635
636 if (inst == chan->ramin->vinst)
637 break;
638 }
639 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
640 return i;
641}
642
643static void
644nvc0_graph_ctxctl_isr(struct drm_device *dev)
645{
646 u32 ustat = nv_rd32(dev, 0x409c18);
647
648 if (ustat & 0x00000001)
649 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
650 if (ustat & 0x00080000)
651 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
652 if (ustat & ~0x00080001)
653 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
654
655 nvc0_graph_ctxctl_debug(dev);
656 nv_wr32(dev, 0x409c20, ustat);
657}
658
659static void
660nvc0_graph_isr(struct drm_device *dev)
661{
662 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
663 u32 chid = nvc0_graph_isr_chid(dev, inst);
664 u32 stat = nv_rd32(dev, 0x400100);
665 u32 addr = nv_rd32(dev, 0x400704);
666 u32 mthd = (addr & 0x00003ffc);
667 u32 subc = (addr & 0x00070000) >> 16;
668 u32 data = nv_rd32(dev, 0x400708);
669 u32 code = nv_rd32(dev, 0x400110);
670 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
671
672 if (stat & 0x00000010) {
673 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
674 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
675 "subc %d class 0x%04x mthd 0x%04x "
676 "data 0x%08x\n",
677 chid, inst, subc, class, mthd, data);
678 }
679 nv_wr32(dev, 0x400100, 0x00000010);
680 stat &= ~0x00000010;
681 }
682
683 if (stat & 0x00000020) {
684 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
685 "class 0x%04x mthd 0x%04x data 0x%08x\n",
686 chid, inst, subc, class, mthd, data);
687 nv_wr32(dev, 0x400100, 0x00000020);
688 stat &= ~0x00000020;
689 }
690
691 if (stat & 0x00100000) {
692 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
693 nouveau_enum_print(nv50_data_error_names, code);
694 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
695 "mthd 0x%04x data 0x%08x\n",
696 chid, inst, subc, class, mthd, data);
697 nv_wr32(dev, 0x400100, 0x00100000);
698 stat &= ~0x00100000;
699 }
700
701 if (stat & 0x00200000) {
702 u32 trap = nv_rd32(dev, 0x400108);
703 NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
704 nv_wr32(dev, 0x400108, trap);
705 nv_wr32(dev, 0x400100, 0x00200000);
706 stat &= ~0x00200000;
707 }
708
709 if (stat & 0x00080000) {
710 nvc0_graph_ctxctl_isr(dev);
711 nv_wr32(dev, 0x400100, 0x00080000);
712 stat &= ~0x00080000;
713 }
714
715 if (stat) {
716 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
717 nv_wr32(dev, 0x400100, stat);
718 }
719
720 nv_wr32(dev, 0x400500, 0x00010001);
721}
722
723static int
724nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
725 struct nvc0_graph_fuc *fuc)
726{
727 struct drm_nouveau_private *dev_priv = dev->dev_private;
728 const struct firmware *fw;
729 char f[32];
730 int ret;
731
732 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
733 ret = request_firmware(&fw, f, &dev->pdev->dev);
734 if (ret) {
735 snprintf(f, sizeof(f), "nouveau/%s", fwname);
736 ret = request_firmware(&fw, f, &dev->pdev->dev);
737 if (ret) {
738 NV_ERROR(dev, "failed to load %s\n", fwname);
739 return ret;
740 }
741 }
742
743 fuc->size = fw->size;
744 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
745 release_firmware(fw);
746 return (fuc->data != NULL) ? 0 : -ENOMEM;
747}
748
749static void
750nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
751{
752 if (fuc->data) {
753 kfree(fuc->data);
754 fuc->data = NULL;
755 }
756}
757
758static void
759nvc0_graph_destroy(struct drm_device *dev, int engine)
760{
761 struct nvc0_graph_priv *priv = nv_engine(dev, engine);
762
763 if (nouveau_ctxfw) {
764 nvc0_graph_destroy_fw(&priv->fuc409c);
765 nvc0_graph_destroy_fw(&priv->fuc409d);
766 nvc0_graph_destroy_fw(&priv->fuc41ac);
767 nvc0_graph_destroy_fw(&priv->fuc41ad);
768 }
769
770 nouveau_irq_unregister(dev, 12);
771
772 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
773 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
774
775 if (priv->grctx_vals)
776 kfree(priv->grctx_vals);
777
778 NVOBJ_ENGINE_DEL(dev, GR);
779 kfree(priv);
780}
781
782int
783nvc0_graph_create(struct drm_device *dev)
784{
785 struct drm_nouveau_private *dev_priv = dev->dev_private;
786 struct nvc0_graph_priv *priv;
787 int ret, gpc, i;
788 u32 fermi;
789
790 fermi = nvc0_graph_class(dev);
791 if (!fermi) {
792 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
793 return 0;
794 }
795
796 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
797 if (!priv)
798 return -ENOMEM;
799
800 priv->base.destroy = nvc0_graph_destroy;
801 priv->base.init = nvc0_graph_init;
802 priv->base.fini = nvc0_graph_fini;
803 priv->base.context_new = nvc0_graph_context_new;
804 priv->base.context_del = nvc0_graph_context_del;
805 priv->base.object_new = nvc0_graph_object_new;
806
807 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
808 nouveau_irq_register(dev, 12, nvc0_graph_isr);
809
810 if (nouveau_ctxfw) {
811 NV_INFO(dev, "PGRAPH: using external firmware\n");
812 if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
813 nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
814 nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
815 nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
816 ret = 0;
817 goto error;
818 }
819 }
820
821 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
822 if (ret)
823 goto error;
824
825 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
826 if (ret)
827 goto error;
828
829 for (i = 0; i < 0x1000; i += 4) {
830 nv_wo32(priv->unk4188b4, i, 0x00000010);
831 nv_wo32(priv->unk4188b8, i, 0x00000010);
832 }
833
834 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
835 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
836 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
837 priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
838 priv->tp_total += priv->tp_nr[gpc];
839 }
840
841 /*XXX: these need figuring out... */
842 switch (dev_priv->chipset) {
843 case 0xc0:
844 if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
845 priv->magic_not_rop_nr = 0x07;
846 } else
847 if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
848 priv->magic_not_rop_nr = 0x05;
849 } else
850 if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
851 priv->magic_not_rop_nr = 0x06;
852 }
853 break;
854 case 0xc3: /* 450, 4/0/0/0, 2 */
855 priv->magic_not_rop_nr = 0x03;
856 break;
857 case 0xc4: /* 460, 3/4/0/0, 4 */
858 priv->magic_not_rop_nr = 0x01;
859 break;
860 case 0xc1: /* 2/0/0/0, 1 */
861 priv->magic_not_rop_nr = 0x01;
862 break;
863 case 0xc8: /* 4/4/3/4, 5 */
864 priv->magic_not_rop_nr = 0x06;
865 break;
866 case 0xce: /* 4/4/0/0, 4 */
867 priv->magic_not_rop_nr = 0x03;
868 break;
869 case 0xcf: /* 4/0/0/0, 3 */
870 priv->magic_not_rop_nr = 0x03;
871 break;
872 case 0xd9: /* 1/0/0/0, 1 */
873 priv->magic_not_rop_nr = 0x01;
874 break;
875 }
876
877 if (!priv->magic_not_rop_nr) {
878 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
879 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
880 priv->tp_nr[3], priv->rop_nr);
881 priv->magic_not_rop_nr = 0x00;
882 }
883
884 NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
885 NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
886 NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
887 if (fermi >= 0x9197)
888 NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
889 if (fermi >= 0x9297)
890 NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
891 NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
892 return 0;
893
894error:
895 nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
896 return ret;
897}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
deleted file mode 100644
index 91d44ea662d9..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVC0_GRAPH_H__
26#define __NVC0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TP_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nvc0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nvc0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nvc0_graph_fuc fuc409c;
46 struct nvc0_graph_fuc fuc409d;
47 struct nvc0_graph_fuc fuc41ac;
48 struct nvc0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tp_nr[GPC_MAX];
53 u8 tp_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nvc0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nvc0_grctx_generate(struct nouveau_channel *);
73
74/* nvc0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nvc0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xc0:
82 case 0xc3:
83 case 0xc4:
84 case 0xce: /* guess, mmio trace shows only 0x9097 state */
85 case 0xcf: /* guess, mmio trace shows only 0x9097 state */
86 return 0x9097;
87 case 0xc1:
88 return 0x9197;
89 case 0xc8:
90 case 0xd9:
91 return 0x9297;
92 default:
93 return 0;
94 }
95}
96
97#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
deleted file mode 100644
index 2f17654e79a6..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ /dev/null
@@ -1,2878 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nvc0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 2) {}
36}
37
38static void
39nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
40{
41 nv_wr32(dev, 0x40448c, data);
42 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
43}
44
45static void
46nvc0_grctx_generate_9097(struct drm_device *dev)
47{
48 u32 fermi = nvc0_graph_class(dev);
49 u32 mthd;
50
51 nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
52 nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
53 nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
54 nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
55 nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
56 nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
57 nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
58 nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
59 nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
60 nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
61 nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
62 nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
63 nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
64 nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
65 nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
66 nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
67 nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
68 nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
69 nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
70 nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
71 nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
72 nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
73 nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
74 nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
75 nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
76 nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
77 nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
78 nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
79 nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
80 nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
81 nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
82 nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
83 nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
84 nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
85 nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
86 nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
87 nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
88 nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
89 nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
90 nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
91 nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
92 nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
93 nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
94 nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
95 nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
96 nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
97 nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
98 nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
99 nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
100 nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
101 nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
102 nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
103 nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
104 nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
105 nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
106 nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
107 nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
108 nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
109 nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
110 nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
111 nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
112 nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
113 nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
114 nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
115 nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
116 nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
117 nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
118 nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
119 nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
120 nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
121 nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
122 nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
123 nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
124 nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
125 nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
126 nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
127 nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
128 nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
129 nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
130 nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
131 nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
132 nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
133 nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
134 nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
135 nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
136 nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
137 nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
138 nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
139 nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
140 nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
141 nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
142 nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
143 nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
144 nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
145 nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
146 nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
147 nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
148 nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
149 nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
150 nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
151 nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
152 nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
153 nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
154 nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
155 nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
156 nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
157 nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
158 nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
159 nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
160 nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
161 nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
162 nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
163 nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
164 nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
165 nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
166 nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
167 nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
168 nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
169 nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
170 nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
171 nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
172 nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
173 nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
174 nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
175 nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
176 nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
177 nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
178 nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
179 nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
180 nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
181 nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
182 nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
183 nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
184 nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
185 nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
186 nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
187 nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
188 nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
189 nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
190 nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
191 nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
192 nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
193 nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
194 nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
195 nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
196 nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
197 nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
198 nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
199 nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
200 nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
201 nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
202 nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
203 nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
204 nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
205 nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
206 nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
207 nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
208 nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
209 nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
210 nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
211 nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
212 nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
213 nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
214 nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
215 nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
216 nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
217 nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
218 nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
219 nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
220 nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
221 nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
222 nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
223 nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
224 nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
225 nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
226 nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
227 nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
228 nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
229 nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
230 nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
231 nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
232 nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
233 nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
234 nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
235 nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
236 nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
237 nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
238 nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
239 nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
240 nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
241 nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
242 nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
243 nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
244 nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
245 nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
246 nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
247 nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
248 nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
249 nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
250 nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
251 nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
252 nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
253 nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
254 nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
255 nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
256 nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
257 nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
258 nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
259 nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
260 nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
261 nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
262 nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
263 nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
264 nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
265 nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
266 nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
267 nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
268 nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
269 nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
270 nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
271 nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
272 nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
273 nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
274 nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
275 nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
276 nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
277 nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
278 nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
279 nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
280 nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
281 nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
282 nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
283 nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
284 nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
285 nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
286 nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
287 nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
288 nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
289 nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
290 nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
291 nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
292 nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
293 nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
294 nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
295 nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
296 nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
297 nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
298 nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
299 nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
300 nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
301 nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
302 nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
303 nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
304 nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
305 nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
306 nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
307 nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
308 nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
309 nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
310 nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
311 nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
312 nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
313 nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
314 nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
315 nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
316 nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
317 nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
318 nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
319 nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
320 nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
321 nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
322 nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
323 nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
324 nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
325 nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
326 nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
327 nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
328 nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
329 nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
330 nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
331 nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
332 nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
333 nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
334 nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
335 nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
336 nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
337 nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
338 nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
339 nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
340 nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
341 nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
342 nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
343 nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
344 nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
345 nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
346 nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
347 nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
348 nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
349 nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
350 nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
351 nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
352 nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
353 nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
354 nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
355 nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
356 nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
357 nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
358 nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
359 nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
360 nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
361 nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
362 nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
363 nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
364 nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
365 nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
366 nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
367 nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
368 nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
369 nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
370 nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
371 nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
372 nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
373 nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
374 nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
375 nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
376 nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
377 nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
378 nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
379 nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
380 nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
381 nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
382 nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
383 nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
384 nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
385 nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
386 nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
387 nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
388 nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
389 nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
390 nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
391 nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
392 nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
393 nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
394 nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
395 nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
396 nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
397 nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
398 nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
399 nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
400 nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
401 nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
402 nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
403 nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
404 nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
405 nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
406 nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
407 nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
408 nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
409 nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
410 nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
411 nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
412 nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
413 nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
414 nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
415 nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
416 nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
417 nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
418 nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
419 nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
420 nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
421 nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
422 nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
423 nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
424 nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
425 nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
426 nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
427 nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
428 nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
429 nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
430 nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
431 nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
432 nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
433 nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
434 nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
435 nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
436 nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
437 nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
438 nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
439 nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
440 nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
441 nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
442 nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
443 nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
444 nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
445 nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
446 nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
447 nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
448 nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
449 nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
450 nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
451 nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
452 nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
453 nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
454 nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
455 nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
456 nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
457 nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
458 nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
459 nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
460 nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
461 nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
462 nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
463 nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
464 nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
465 nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
466 nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
467 nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
468 nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
469 nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
470 nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
471 nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
472 nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
473 nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
474 nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
475 nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
476 nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
477 nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
478 nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
479 nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
480 nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
481 nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
482 nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
483 nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
484 nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
485 nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
486 nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
487 nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
488 nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
489 nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
490 nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
491 nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
492 nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
493 nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
494 nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
495 nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
496 nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
497 nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
498 nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
499 nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
500 nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
501 nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
502 nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
503 nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
504 nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
505 nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
506 nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
507 nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
508 nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
509 nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
510 nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
511 nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
512 nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
513 nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
514 nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
515 nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
516 nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
517 nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
518 nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
519 nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
520 nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
521 nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
522 nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
523 nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
524 nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
525 nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
526 nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
527 nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
528 nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
529 nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
530 nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
531 nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
532 nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
533 nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
534 nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
535 nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
536 nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
537 nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
538 nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
539 nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
540 nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
541 nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
542 nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
543 nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
544 nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
545 nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
546 nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
547 nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
548 nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
549 nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
550 nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
551 nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
552 nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
553 nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
554 nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
555 nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
556 nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
557 nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
558 nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
559 nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
560 nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
561 nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
562 nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
563 nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
564 nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
565 nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
566 nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
567 nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
568 nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
569 nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
570 nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
571 nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
572 nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
573 nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
574 nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
575 nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
576 nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
577 nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
578 nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
579 nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
580 nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
581 nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
582 nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
583 nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
584 nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
585 nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
586 nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
587 nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
588 nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
589 nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
590 nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
591 nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
592 nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
593 nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
594 nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
595 nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
596 nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
597 nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
598 nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
599 nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
600 nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
601 nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
602 nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
603 nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
604 nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
605 nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
606 nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
607 nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
608 nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
609 nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
610 nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
611 nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
612 nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
613 nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
614 nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
615 nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
616 nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
617 nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
618 nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
619 nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
620 nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
621 nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
622 nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
623 nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
624 nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
625 nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
626 nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
627 nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
628 nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
629 nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
630 nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
631 nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
632 nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
633 nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
634 nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
635 nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
636 nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
637 nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
638 nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
639 nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
640 nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
641 nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
642 nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
643 nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
644 nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
645 nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
646 nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
647 nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
648 nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
649 nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
650 nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
651 nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
652 nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
653 nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
654 nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
655 nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
656 nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
657 nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
658 nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
659 nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
660 nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
661 nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
662 nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
663 nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
664 nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
665 nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
666 nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
667 nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
668 nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
669 nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
670 nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
671 nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
672 nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
673 nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
674 nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
675 nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
676 nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
677 nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
678 nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
679 nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
680 nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
681 nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
682 nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
683 nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
684 nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
685 nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
686 nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
687 nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
688 nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
689 nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
690 nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
691 nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
692 nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
693 nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
694 nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
695 nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
696 nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
697 nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
698 nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
699 nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
700 nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
701 nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
702 nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
703 nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
704 nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
705 nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
706 nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
707 nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
708 nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
709 nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
710 nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
711 nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
712 nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
713 nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
714 nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
715 nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
716 nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
717 nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
718 nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
719 nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
720 nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
721 nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
722 nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
723 nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
724 nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
725 nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
726 nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
727 nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
728 nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
729 nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
730 nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
731 nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
732 nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
733 nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
734 nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
735 nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
736 nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
737 nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
738 nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
739 nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
740 nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
741 nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
742 nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
743 nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
744 nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
745 nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
746 nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
747 nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
748 nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
749 nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
750 nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
751 nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
752 nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
753 nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
754 nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
755 nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
756 nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
757 nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
758 nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
759 nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
760 nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
761 nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
762 nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
763 nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
764 nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
765 nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
766 nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
767 nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
768 nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
769 nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
770 nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
771 nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
772 nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
773 nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
774 nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
775 nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
776 nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
777 nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
778 nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
779 nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
780 nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
781 nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
782 nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
783 nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
784 nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
785 nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
786 nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
787 nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
788 nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
789 nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
790 nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
791 nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
792 nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
793 nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
794 nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
795 nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
796 nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
797 nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
798 nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
799 nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
800 nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
801 nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
802 nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
803 nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
804 nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
805 nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
806 nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
807 nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
808 nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
809 nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
810 nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
811 nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
812 nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
813 nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
814 nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
815 nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
816 nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
817 nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
818 nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
819 nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
820 nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
821 nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
822 nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
823 nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
824 nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
825 nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
826 nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
827 nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
828 nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
829 nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
830 if (fermi == 0x9097) {
831 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
832 nv_mthd(dev, 0x9097, mthd, 0x00000000);
833 }
834 nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
835 nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
836 nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
837 nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
838 nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
839 nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
840 nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
841 nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
842 nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
843 nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
844 nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
845 nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
846 nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
847 nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
848 nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
849 nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
850 nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
851 nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
852 nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
853 nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
854 nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
855 nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
856 nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
857 nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
858 nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
859 nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
860 nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
861 nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
862 nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
863 nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
864 nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
865 nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
866 nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
867 nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
868 nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
869 nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
870 nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
871 nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
872 nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
873 nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
874 nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
875 nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
876 nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
877 nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
878 nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
879 nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
880 nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
881 nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
882 nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
883 nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
884 nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
885 nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
886 nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
887 nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
888 nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
889 nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
890 nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
891 nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
892 nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
893 nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
894 nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
895 nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
896 nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
897 nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
898 nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
899 nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
900 nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
901 nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
902 nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
903 nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
904 nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
905 nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
906 nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
907 nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
908 nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
909 nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
910 nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
911 nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
912 nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
913 nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
914 nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
915 nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
916 nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
917 nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
918 nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
919 nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
920 nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
921 nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
922 nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
923 nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
924 nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
925 nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
926 nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
927 nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
928 nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
929 nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
930 nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
931 nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
932 nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
933 nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
934 nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
935 nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
936 nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
937 nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
938 nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
939 nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
940 nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
941 nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
942 nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
943 nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
944 nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
945 nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
946 nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
947 nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
948 nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
949 nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
950 nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
951 nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
952 nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
953 nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
954 nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
955 nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
956 nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
957 nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
958 nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
959 nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
960 nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
961 nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
962 nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
963 nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
964 nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
965 nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
966 nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
967 nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
968 nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
969 nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
970 nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
971 nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
972 nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
973 nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
974 nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
975 nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
976 nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
977 nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
978 nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
979 nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
980 nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
981 nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
982 nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
983 nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
984 nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
985 nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
986 nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
987 nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
988 nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
989 nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
990 nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
991 nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
992 nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
993 nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
994 nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
995 nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
996 nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
997 nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
998 nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
999 nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
1000 nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
1001 nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
1002 nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
1003 nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
1004 nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
1005 nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
1006 nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
1007 nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
1008 nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
1009 nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
1010 nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
1011 nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
1012 nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
1013 nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
1014 nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
1015 nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
1016 nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
1017 nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
1018 nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
1019 nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
1020 nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
1021 nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
1022 nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
1023 nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
1024 nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
1025 nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
1026 nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
1027 nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
1028 nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
1029 nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
1030 nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
1031 nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
1032 nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
1033 nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
1034 nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
1035 nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
1036 nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
1037 nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
1038 nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
1039 nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
1040 nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
1041 nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
1042 nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
1043 nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
1044 nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
1045 nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
1046 nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
1047 nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
1048 nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
1049 nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
1050 nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
1051 nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
1052 nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
1053 nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
1054 nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
1055 nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
1056 nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
1057 nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
1058 nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
1059 nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
1060 nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
1061 nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
1062 nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
1063 nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
1064 nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
1065 nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
1066 nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
1067 nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
1068 nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
1069 nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
1070 nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
1071 nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
1072 nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
1073 nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
1074 nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
1075 nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
1076 nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
1077 nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
1078 nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
1079 nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
1080 nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
1081 nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
1082 nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
1083 nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
1084 nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
1085 nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
1086 nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
1087 nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
1088 nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
1089 nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
1090 nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
1091 nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
1092 nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
1093 nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
1094 nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
1095 nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
1096 nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
1097 nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
1098 nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
1099 nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
1100 nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
1101 nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
1102 nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
1103 nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
1104 nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
1105 nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
1106 nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
1107 nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
1108 nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
1109 nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
1110 nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
1111 nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
1112 nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
1113 nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
1114 nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
1115 nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
1116 nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
1117 nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
1118 nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
1119 nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
1120 nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
1121 nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
1122 nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
1123 nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
1124 nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
1125 nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
1126 nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
1127 nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
1128 nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
1129 nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
1130 nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
1131 nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
1132 nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
1133 nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
1134 nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
1135 nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
1136 nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
1137 nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
1138 nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
1139 nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
1140 nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
1141 nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
1142 nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
1143 nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
1144 nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
1145 nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
1146 nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
1147 nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
1148 nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
1149 nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
1150 nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
1151 nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
1152 nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
1153 nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
1154 nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
1155 nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
1156 nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
1157 nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
1158 nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
1159 nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
1160 nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
1161 nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
1162 nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
1163 nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
1164 nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
1165 nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
1166 nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
1167 nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
1168 nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
1169 nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
1170 nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
1171 nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
1172 nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
1173 nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
1174 nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
1175 nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
1176 nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
1177 nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
1178 nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
1179 nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
1180 nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
1181 nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
1182 nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
1183 nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
1184 nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
1185 nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
1186 nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
1187 nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
1188 nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
1189 nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
1190 nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
1191 nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
1192 nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
1193 nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
1194 nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
1195 nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
1196 nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
1197 nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
1198 /* in trace, right after 0x90c0, not here */
1199 nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
1200}
1201
1202static void
1203nvc0_grctx_generate_9197(struct drm_device *dev)
1204{
1205 u32 fermi = nvc0_graph_class(dev);
1206 u32 mthd;
1207
1208 if (fermi == 0x9197) {
1209 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1210 nv_mthd(dev, 0x9197, mthd, 0x00000000);
1211 }
1212 nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
1213}
1214
1215static void
1216nvc0_grctx_generate_9297(struct drm_device *dev)
1217{
1218 u32 fermi = nvc0_graph_class(dev);
1219 u32 mthd;
1220
1221 if (fermi == 0x9297) {
1222 for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
1223 nv_mthd(dev, 0x9297, mthd, 0x00000000);
1224 }
1225 nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
1226 nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
1227 nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
1228 nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
1229 nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
1230 nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
1231}
1232
1233static void
1234nvc0_grctx_generate_902d(struct drm_device *dev)
1235{
1236 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
1237 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
1238 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
1239 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
1240 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
1241 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
1242 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
1243 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
1244 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
1245 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
1246 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
1247 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
1248 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
1249 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
1250 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
1251 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
1252 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
1253}
1254
1255static void
1256nvc0_grctx_generate_9039(struct drm_device *dev)
1257{
1258 nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
1259 nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
1260 nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
1261 nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
1262 nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
1263 nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
1264 nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
1265 nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
1266}
1267
1268static void
1269nvc0_grctx_generate_90c0(struct drm_device *dev)
1270{
1271 struct drm_nouveau_private *dev_priv = dev->dev_private;
1272 int i;
1273
1274 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1275 nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
1276 nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
1277 nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
1278 nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
1279 nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
1280 nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
1281 }
1282 nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
1283 nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
1284 nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
1285 nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
1286 nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
1287 nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
1288 nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
1289 nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
1290 for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
1291 nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
1292 nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
1293 nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
1294 nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
1295 }
1296 nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
1297 nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
1298 nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
1299 nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
1300 nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
1301 nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
1302 nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
1303 nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
1304 nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
1305 nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
1306 nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
1307 nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
1308 nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
1309 nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
1310 nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
1311 nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
1312 nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
1313 nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
1314 nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
1315}
1316
1317static void
1318nvc0_grctx_generate_dispatch(struct drm_device *dev)
1319{
1320 int i;
1321
1322 nv_wr32(dev, 0x404004, 0x00000000);
1323 nv_wr32(dev, 0x404008, 0x00000000);
1324 nv_wr32(dev, 0x40400c, 0x00000000);
1325 nv_wr32(dev, 0x404010, 0x00000000);
1326 nv_wr32(dev, 0x404014, 0x00000000);
1327 nv_wr32(dev, 0x404018, 0x00000000);
1328 nv_wr32(dev, 0x40401c, 0x00000000);
1329 nv_wr32(dev, 0x404020, 0x00000000);
1330 nv_wr32(dev, 0x404024, 0x00000000);
1331 nv_wr32(dev, 0x404028, 0x00000000);
1332 nv_wr32(dev, 0x40402c, 0x00000000);
1333 nv_wr32(dev, 0x404044, 0x00000000);
1334 nv_wr32(dev, 0x404094, 0x00000000);
1335 nv_wr32(dev, 0x404098, 0x00000000);
1336 nv_wr32(dev, 0x40409c, 0x00000000);
1337 nv_wr32(dev, 0x4040a0, 0x00000000);
1338 nv_wr32(dev, 0x4040a4, 0x00000000);
1339 nv_wr32(dev, 0x4040a8, 0x00000000);
1340 nv_wr32(dev, 0x4040ac, 0x00000000);
1341 nv_wr32(dev, 0x4040b0, 0x00000000);
1342 nv_wr32(dev, 0x4040b4, 0x00000000);
1343 nv_wr32(dev, 0x4040b8, 0x00000000);
1344 nv_wr32(dev, 0x4040bc, 0x00000000);
1345 nv_wr32(dev, 0x4040c0, 0x00000000);
1346 nv_wr32(dev, 0x4040c4, 0x00000000);
1347 nv_wr32(dev, 0x4040c8, 0xf0000087);
1348 nv_wr32(dev, 0x4040d4, 0x00000000);
1349 nv_wr32(dev, 0x4040d8, 0x00000000);
1350 nv_wr32(dev, 0x4040dc, 0x00000000);
1351 nv_wr32(dev, 0x4040e0, 0x00000000);
1352 nv_wr32(dev, 0x4040e4, 0x00000000);
1353 nv_wr32(dev, 0x4040e8, 0x00001000);
1354 nv_wr32(dev, 0x4040f8, 0x00000000);
1355 nv_wr32(dev, 0x404130, 0x00000000);
1356 nv_wr32(dev, 0x404134, 0x00000000);
1357 nv_wr32(dev, 0x404138, 0x20000040);
1358 nv_wr32(dev, 0x404150, 0x0000002e);
1359 nv_wr32(dev, 0x404154, 0x00000400);
1360 nv_wr32(dev, 0x404158, 0x00000200);
1361 nv_wr32(dev, 0x404164, 0x00000055);
1362 nv_wr32(dev, 0x404168, 0x00000000);
1363 nv_wr32(dev, 0x404174, 0x00000000);
1364 nv_wr32(dev, 0x404178, 0x00000000);
1365 nv_wr32(dev, 0x40417c, 0x00000000);
1366 for (i = 0; i < 8; i++)
1367 nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
1368}
1369
1370static void
1371nvc0_grctx_generate_macro(struct drm_device *dev)
1372{
1373 nv_wr32(dev, 0x404404, 0x00000000);
1374 nv_wr32(dev, 0x404408, 0x00000000);
1375 nv_wr32(dev, 0x40440c, 0x00000000);
1376 nv_wr32(dev, 0x404410, 0x00000000);
1377 nv_wr32(dev, 0x404414, 0x00000000);
1378 nv_wr32(dev, 0x404418, 0x00000000);
1379 nv_wr32(dev, 0x40441c, 0x00000000);
1380 nv_wr32(dev, 0x404420, 0x00000000);
1381 nv_wr32(dev, 0x404424, 0x00000000);
1382 nv_wr32(dev, 0x404428, 0x00000000);
1383 nv_wr32(dev, 0x40442c, 0x00000000);
1384 nv_wr32(dev, 0x404430, 0x00000000);
1385 nv_wr32(dev, 0x404434, 0x00000000);
1386 nv_wr32(dev, 0x404438, 0x00000000);
1387 nv_wr32(dev, 0x404460, 0x00000000);
1388 nv_wr32(dev, 0x404464, 0x00000000);
1389 nv_wr32(dev, 0x404468, 0x00ffffff);
1390 nv_wr32(dev, 0x40446c, 0x00000000);
1391 nv_wr32(dev, 0x404480, 0x00000001);
1392 nv_wr32(dev, 0x404498, 0x00000001);
1393}
1394
1395static void
1396nvc0_grctx_generate_m2mf(struct drm_device *dev)
1397{
1398 nv_wr32(dev, 0x404604, 0x00000015);
1399 nv_wr32(dev, 0x404608, 0x00000000);
1400 nv_wr32(dev, 0x40460c, 0x00002e00);
1401 nv_wr32(dev, 0x404610, 0x00000100);
1402 nv_wr32(dev, 0x404618, 0x00000000);
1403 nv_wr32(dev, 0x40461c, 0x00000000);
1404 nv_wr32(dev, 0x404620, 0x00000000);
1405 nv_wr32(dev, 0x404624, 0x00000000);
1406 nv_wr32(dev, 0x404628, 0x00000000);
1407 nv_wr32(dev, 0x40462c, 0x00000000);
1408 nv_wr32(dev, 0x404630, 0x00000000);
1409 nv_wr32(dev, 0x404634, 0x00000000);
1410 nv_wr32(dev, 0x404638, 0x00000004);
1411 nv_wr32(dev, 0x40463c, 0x00000000);
1412 nv_wr32(dev, 0x404640, 0x00000000);
1413 nv_wr32(dev, 0x404644, 0x00000000);
1414 nv_wr32(dev, 0x404648, 0x00000000);
1415 nv_wr32(dev, 0x40464c, 0x00000000);
1416 nv_wr32(dev, 0x404650, 0x00000000);
1417 nv_wr32(dev, 0x404654, 0x00000000);
1418 nv_wr32(dev, 0x404658, 0x00000000);
1419 nv_wr32(dev, 0x40465c, 0x007f0100);
1420 nv_wr32(dev, 0x404660, 0x00000000);
1421 nv_wr32(dev, 0x404664, 0x00000000);
1422 nv_wr32(dev, 0x404668, 0x00000000);
1423 nv_wr32(dev, 0x40466c, 0x00000000);
1424 nv_wr32(dev, 0x404670, 0x00000000);
1425 nv_wr32(dev, 0x404674, 0x00000000);
1426 nv_wr32(dev, 0x404678, 0x00000000);
1427 nv_wr32(dev, 0x40467c, 0x00000002);
1428 nv_wr32(dev, 0x404680, 0x00000000);
1429 nv_wr32(dev, 0x404684, 0x00000000);
1430 nv_wr32(dev, 0x404688, 0x00000000);
1431 nv_wr32(dev, 0x40468c, 0x00000000);
1432 nv_wr32(dev, 0x404690, 0x00000000);
1433 nv_wr32(dev, 0x404694, 0x00000000);
1434 nv_wr32(dev, 0x404698, 0x00000000);
1435 nv_wr32(dev, 0x40469c, 0x00000000);
1436 nv_wr32(dev, 0x4046a0, 0x007f0080);
1437 nv_wr32(dev, 0x4046a4, 0x00000000);
1438 nv_wr32(dev, 0x4046a8, 0x00000000);
1439 nv_wr32(dev, 0x4046ac, 0x00000000);
1440 nv_wr32(dev, 0x4046b0, 0x00000000);
1441 nv_wr32(dev, 0x4046b4, 0x00000000);
1442 nv_wr32(dev, 0x4046b8, 0x00000000);
1443 nv_wr32(dev, 0x4046bc, 0x00000000);
1444 nv_wr32(dev, 0x4046c0, 0x00000000);
1445 nv_wr32(dev, 0x4046c4, 0x00000000);
1446 nv_wr32(dev, 0x4046c8, 0x00000000);
1447 nv_wr32(dev, 0x4046cc, 0x00000000);
1448 nv_wr32(dev, 0x4046d0, 0x00000000);
1449 nv_wr32(dev, 0x4046d4, 0x00000000);
1450 nv_wr32(dev, 0x4046d8, 0x00000000);
1451 nv_wr32(dev, 0x4046dc, 0x00000000);
1452 nv_wr32(dev, 0x4046e0, 0x00000000);
1453 nv_wr32(dev, 0x4046e4, 0x00000000);
1454 nv_wr32(dev, 0x4046e8, 0x00000000);
1455 nv_wr32(dev, 0x4046f0, 0x00000000);
1456 nv_wr32(dev, 0x4046f4, 0x00000000);
1457}
1458
1459static void
1460nvc0_grctx_generate_unk47xx(struct drm_device *dev)
1461{
1462 nv_wr32(dev, 0x404700, 0x00000000);
1463 nv_wr32(dev, 0x404704, 0x00000000);
1464 nv_wr32(dev, 0x404708, 0x00000000);
1465 nv_wr32(dev, 0x40470c, 0x00000000);
1466 nv_wr32(dev, 0x404710, 0x00000000);
1467 nv_wr32(dev, 0x404714, 0x00000000);
1468 nv_wr32(dev, 0x404718, 0x00000000);
1469 nv_wr32(dev, 0x40471c, 0x00000000);
1470 nv_wr32(dev, 0x404720, 0x00000000);
1471 nv_wr32(dev, 0x404724, 0x00000000);
1472 nv_wr32(dev, 0x404728, 0x00000000);
1473 nv_wr32(dev, 0x40472c, 0x00000000);
1474 nv_wr32(dev, 0x404730, 0x00000000);
1475 nv_wr32(dev, 0x404734, 0x00000100);
1476 nv_wr32(dev, 0x404738, 0x00000000);
1477 nv_wr32(dev, 0x40473c, 0x00000000);
1478 nv_wr32(dev, 0x404740, 0x00000000);
1479 nv_wr32(dev, 0x404744, 0x00000000);
1480 nv_wr32(dev, 0x404748, 0x00000000);
1481 nv_wr32(dev, 0x40474c, 0x00000000);
1482 nv_wr32(dev, 0x404750, 0x00000000);
1483 nv_wr32(dev, 0x404754, 0x00000000);
1484}
1485
1486static void
1487nvc0_grctx_generate_shaders(struct drm_device *dev)
1488{
1489 struct drm_nouveau_private *dev_priv = dev->dev_private;
1490
1491 if (dev_priv->chipset == 0xd9) {
1492 nv_wr32(dev, 0x405800, 0x0f8000bf);
1493 nv_wr32(dev, 0x405830, 0x02180218);
1494 nv_wr32(dev, 0x405834, 0x08000000);
1495 } else
1496 if (dev_priv->chipset == 0xc1) {
1497 nv_wr32(dev, 0x405800, 0x0f8000bf);
1498 nv_wr32(dev, 0x405830, 0x02180218);
1499 nv_wr32(dev, 0x405834, 0x00000000);
1500 } else {
1501 nv_wr32(dev, 0x405800, 0x078000bf);
1502 nv_wr32(dev, 0x405830, 0x02180000);
1503 nv_wr32(dev, 0x405834, 0x00000000);
1504 }
1505 nv_wr32(dev, 0x405838, 0x00000000);
1506 nv_wr32(dev, 0x405854, 0x00000000);
1507 nv_wr32(dev, 0x405870, 0x00000001);
1508 nv_wr32(dev, 0x405874, 0x00000001);
1509 nv_wr32(dev, 0x405878, 0x00000001);
1510 nv_wr32(dev, 0x40587c, 0x00000001);
1511 nv_wr32(dev, 0x405a00, 0x00000000);
1512 nv_wr32(dev, 0x405a04, 0x00000000);
1513 nv_wr32(dev, 0x405a18, 0x00000000);
1514}
1515
1516static void
1517nvc0_grctx_generate_unk60xx(struct drm_device *dev)
1518{
1519 nv_wr32(dev, 0x406020, 0x000103c1);
1520 nv_wr32(dev, 0x406028, 0x00000001);
1521 nv_wr32(dev, 0x40602c, 0x00000001);
1522 nv_wr32(dev, 0x406030, 0x00000001);
1523 nv_wr32(dev, 0x406034, 0x00000001);
1524}
1525
1526static void
1527nvc0_grctx_generate_unk64xx(struct drm_device *dev)
1528{
1529 struct drm_nouveau_private *dev_priv = dev->dev_private;
1530
1531 nv_wr32(dev, 0x4064a8, 0x00000000);
1532 nv_wr32(dev, 0x4064ac, 0x00003fff);
1533 nv_wr32(dev, 0x4064b4, 0x00000000);
1534 nv_wr32(dev, 0x4064b8, 0x00000000);
1535 if (dev_priv->chipset == 0xd9)
1536 nv_wr32(dev, 0x4064bc, 0x00000000);
1537 if (dev_priv->chipset == 0xc1 ||
1538 dev_priv->chipset == 0xd9) {
1539 nv_wr32(dev, 0x4064c0, 0x80140078);
1540 nv_wr32(dev, 0x4064c4, 0x0086ffff);
1541 }
1542}
1543
1544static void
1545nvc0_grctx_generate_tpbus(struct drm_device *dev)
1546{
1547 nv_wr32(dev, 0x407804, 0x00000023);
1548 nv_wr32(dev, 0x40780c, 0x0a418820);
1549 nv_wr32(dev, 0x407810, 0x062080e6);
1550 nv_wr32(dev, 0x407814, 0x020398a4);
1551 nv_wr32(dev, 0x407818, 0x0e629062);
1552 nv_wr32(dev, 0x40781c, 0x0a418820);
1553 nv_wr32(dev, 0x407820, 0x000000e6);
1554 nv_wr32(dev, 0x4078bc, 0x00000103);
1555}
1556
1557static void
1558nvc0_grctx_generate_ccache(struct drm_device *dev)
1559{
1560 nv_wr32(dev, 0x408000, 0x00000000);
1561 nv_wr32(dev, 0x408004, 0x00000000);
1562 nv_wr32(dev, 0x408008, 0x00000018);
1563 nv_wr32(dev, 0x40800c, 0x00000000);
1564 nv_wr32(dev, 0x408010, 0x00000000);
1565 nv_wr32(dev, 0x408014, 0x00000069);
1566 nv_wr32(dev, 0x408018, 0xe100e100);
1567 nv_wr32(dev, 0x408064, 0x00000000);
1568}
1569
1570static void
1571nvc0_grctx_generate_rop(struct drm_device *dev)
1572{
1573 struct drm_nouveau_private *dev_priv = dev->dev_private;
1574 int chipset = dev_priv->chipset;
1575
1576 /* ROPC_BROADCAST */
1577 nv_wr32(dev, 0x408800, 0x02802a3c);
1578 nv_wr32(dev, 0x408804, 0x00000040);
1579 if (chipset == 0xd9) {
1580 nv_wr32(dev, 0x408808, 0x1043e005);
1581 nv_wr32(dev, 0x408900, 0x3080b801);
1582 nv_wr32(dev, 0x408904, 0x1043e005);
1583 nv_wr32(dev, 0x408908, 0x00c8102f);
1584 } else
1585 if (chipset == 0xc1) {
1586 nv_wr32(dev, 0x408808, 0x1003e005);
1587 nv_wr32(dev, 0x408900, 0x3080b801);
1588 nv_wr32(dev, 0x408904, 0x62000001);
1589 nv_wr32(dev, 0x408908, 0x00c80929);
1590 } else {
1591 nv_wr32(dev, 0x408808, 0x0003e00d);
1592 nv_wr32(dev, 0x408900, 0x3080b801);
1593 nv_wr32(dev, 0x408904, 0x02000001);
1594 nv_wr32(dev, 0x408908, 0x00c80929);
1595 }
1596 nv_wr32(dev, 0x40890c, 0x00000000);
1597 nv_wr32(dev, 0x408980, 0x0000011d);
1598}
1599
1600static void
1601nvc0_grctx_generate_gpc(struct drm_device *dev)
1602{
1603 struct drm_nouveau_private *dev_priv = dev->dev_private;
1604 int chipset = dev_priv->chipset;
1605 int i;
1606
1607 /* GPC_BROADCAST */
1608 nv_wr32(dev, 0x418380, 0x00000016);
1609 nv_wr32(dev, 0x418400, 0x38004e00);
1610 nv_wr32(dev, 0x418404, 0x71e0ffff);
1611 nv_wr32(dev, 0x418408, 0x00000000);
1612 nv_wr32(dev, 0x41840c, 0x00001008);
1613 nv_wr32(dev, 0x418410, 0x0fff0fff);
1614 nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
1615 nv_wr32(dev, 0x418450, 0x00000000);
1616 nv_wr32(dev, 0x418454, 0x00000000);
1617 nv_wr32(dev, 0x418458, 0x00000000);
1618 nv_wr32(dev, 0x41845c, 0x00000000);
1619 nv_wr32(dev, 0x418460, 0x00000000);
1620 nv_wr32(dev, 0x418464, 0x00000000);
1621 nv_wr32(dev, 0x418468, 0x00000001);
1622 nv_wr32(dev, 0x41846c, 0x00000000);
1623 nv_wr32(dev, 0x418470, 0x00000000);
1624 nv_wr32(dev, 0x418600, 0x0000001f);
1625 nv_wr32(dev, 0x418684, 0x0000000f);
1626 nv_wr32(dev, 0x418700, 0x00000002);
1627 nv_wr32(dev, 0x418704, 0x00000080);
1628 nv_wr32(dev, 0x418708, 0x00000000);
1629 nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
1630 nv_wr32(dev, 0x418710, 0x00000000);
1631 nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
1632 nv_wr32(dev, 0x418808, 0x00000000);
1633 nv_wr32(dev, 0x41880c, 0x00000000);
1634 nv_wr32(dev, 0x418810, 0x00000000);
1635 nv_wr32(dev, 0x418828, 0x00008442);
1636 if (chipset == 0xc1 || chipset == 0xd9)
1637 nv_wr32(dev, 0x418830, 0x10000001);
1638 else
1639 nv_wr32(dev, 0x418830, 0x00000001);
1640 nv_wr32(dev, 0x4188d8, 0x00000008);
1641 nv_wr32(dev, 0x4188e0, 0x01000000);
1642 nv_wr32(dev, 0x4188e8, 0x00000000);
1643 nv_wr32(dev, 0x4188ec, 0x00000000);
1644 nv_wr32(dev, 0x4188f0, 0x00000000);
1645 nv_wr32(dev, 0x4188f4, 0x00000000);
1646 nv_wr32(dev, 0x4188f8, 0x00000000);
1647 if (chipset == 0xd9)
1648 nv_wr32(dev, 0x4188fc, 0x20100008);
1649 else if (chipset == 0xc1)
1650 nv_wr32(dev, 0x4188fc, 0x00100018);
1651 else
1652 nv_wr32(dev, 0x4188fc, 0x00100000);
1653 nv_wr32(dev, 0x41891c, 0x00ff00ff);
1654 nv_wr32(dev, 0x418924, 0x00000000);
1655 nv_wr32(dev, 0x418928, 0x00ffff00);
1656 nv_wr32(dev, 0x41892c, 0x0000ff00);
1657 for (i = 0; i < 8; i++) {
1658 nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
1659 nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
1660 nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
1661 nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
1662 nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
1663 nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
1664 nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
1665 }
1666 nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
1667 nv_wr32(dev, 0x418b08, 0x0a418820);
1668 nv_wr32(dev, 0x418b0c, 0x062080e6);
1669 nv_wr32(dev, 0x418b10, 0x020398a4);
1670 nv_wr32(dev, 0x418b14, 0x0e629062);
1671 nv_wr32(dev, 0x418b18, 0x0a418820);
1672 nv_wr32(dev, 0x418b1c, 0x000000e6);
1673 nv_wr32(dev, 0x418bb8, 0x00000103);
1674 nv_wr32(dev, 0x418c08, 0x00000001);
1675 nv_wr32(dev, 0x418c10, 0x00000000);
1676 nv_wr32(dev, 0x418c14, 0x00000000);
1677 nv_wr32(dev, 0x418c18, 0x00000000);
1678 nv_wr32(dev, 0x418c1c, 0x00000000);
1679 nv_wr32(dev, 0x418c20, 0x00000000);
1680 nv_wr32(dev, 0x418c24, 0x00000000);
1681 nv_wr32(dev, 0x418c28, 0x00000000);
1682 nv_wr32(dev, 0x418c2c, 0x00000000);
1683 if (chipset == 0xc1 || chipset == 0xd9)
1684 nv_wr32(dev, 0x418c6c, 0x00000001);
1685 nv_wr32(dev, 0x418c80, 0x20200004);
1686 nv_wr32(dev, 0x418c8c, 0x00000001);
1687 nv_wr32(dev, 0x419000, 0x00000780);
1688 nv_wr32(dev, 0x419004, 0x00000000);
1689 nv_wr32(dev, 0x419008, 0x00000000);
1690 nv_wr32(dev, 0x419014, 0x00000004);
1691}
1692
1693static void
1694nvc0_grctx_generate_tp(struct drm_device *dev)
1695{
1696 struct drm_nouveau_private *dev_priv = dev->dev_private;
1697 int chipset = dev_priv->chipset;
1698
1699 /* GPC_BROADCAST.TP_BROADCAST */
1700 nv_wr32(dev, 0x419818, 0x00000000);
1701 nv_wr32(dev, 0x41983c, 0x00038bc7);
1702 nv_wr32(dev, 0x419848, 0x00000000);
1703 if (chipset == 0xc1 || chipset == 0xd9)
1704 nv_wr32(dev, 0x419864, 0x00000129);
1705 else
1706 nv_wr32(dev, 0x419864, 0x0000012a);
1707 nv_wr32(dev, 0x419888, 0x00000000);
1708 nv_wr32(dev, 0x419a00, 0x000001f0);
1709 nv_wr32(dev, 0x419a04, 0x00000001);
1710 nv_wr32(dev, 0x419a08, 0x00000023);
1711 nv_wr32(dev, 0x419a0c, 0x00020000);
1712 nv_wr32(dev, 0x419a10, 0x00000000);
1713 nv_wr32(dev, 0x419a14, 0x00000200);
1714 nv_wr32(dev, 0x419a1c, 0x00000000);
1715 nv_wr32(dev, 0x419a20, 0x00000800);
1716 if (chipset == 0xd9)
1717 nv_wr32(dev, 0x00419ac4, 0x0017f440);
1718 else if (chipset != 0xc0 && chipset != 0xc8)
1719 nv_wr32(dev, 0x00419ac4, 0x0007f440);
1720 nv_wr32(dev, 0x419b00, 0x0a418820);
1721 nv_wr32(dev, 0x419b04, 0x062080e6);
1722 nv_wr32(dev, 0x419b08, 0x020398a4);
1723 nv_wr32(dev, 0x419b0c, 0x0e629062);
1724 nv_wr32(dev, 0x419b10, 0x0a418820);
1725 nv_wr32(dev, 0x419b14, 0x000000e6);
1726 nv_wr32(dev, 0x419bd0, 0x00900103);
1727 if (chipset == 0xc1 || chipset == 0xd9)
1728 nv_wr32(dev, 0x419be0, 0x00400001);
1729 else
1730 nv_wr32(dev, 0x419be0, 0x00000001);
1731 nv_wr32(dev, 0x419be4, 0x00000000);
1732 nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
1733 nv_wr32(dev, 0x419c04, 0x00000006);
1734 nv_wr32(dev, 0x419c08, 0x00000002);
1735 nv_wr32(dev, 0x419c20, 0x00000000);
1736 if (dev_priv->chipset == 0xd9) {
1737 nv_wr32(dev, 0x419c24, 0x00084210);
1738 nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
1739 nv_wr32(dev, 0x419cb0, 0x00020048);
1740 } else
1741 if (chipset == 0xce || chipset == 0xcf) {
1742 nv_wr32(dev, 0x419cb0, 0x00020048);
1743 } else {
1744 nv_wr32(dev, 0x419cb0, 0x00060048);
1745 }
1746 nv_wr32(dev, 0x419ce8, 0x00000000);
1747 nv_wr32(dev, 0x419cf4, 0x00000183);
1748 if (chipset == 0xc1 || chipset == 0xd9)
1749 nv_wr32(dev, 0x419d20, 0x12180000);
1750 else
1751 nv_wr32(dev, 0x419d20, 0x02180000);
1752 nv_wr32(dev, 0x419d24, 0x00001fff);
1753 if (chipset == 0xc1 || chipset == 0xd9)
1754 nv_wr32(dev, 0x419d44, 0x02180218);
1755 nv_wr32(dev, 0x419e04, 0x00000000);
1756 nv_wr32(dev, 0x419e08, 0x00000000);
1757 nv_wr32(dev, 0x419e0c, 0x00000000);
1758 nv_wr32(dev, 0x419e10, 0x00000002);
1759 nv_wr32(dev, 0x419e44, 0x001beff2);
1760 nv_wr32(dev, 0x419e48, 0x00000000);
1761 nv_wr32(dev, 0x419e4c, 0x0000000f);
1762 nv_wr32(dev, 0x419e50, 0x00000000);
1763 nv_wr32(dev, 0x419e54, 0x00000000);
1764 nv_wr32(dev, 0x419e58, 0x00000000);
1765 nv_wr32(dev, 0x419e5c, 0x00000000);
1766 nv_wr32(dev, 0x419e60, 0x00000000);
1767 nv_wr32(dev, 0x419e64, 0x00000000);
1768 nv_wr32(dev, 0x419e68, 0x00000000);
1769 nv_wr32(dev, 0x419e6c, 0x00000000);
1770 nv_wr32(dev, 0x419e70, 0x00000000);
1771 nv_wr32(dev, 0x419e74, 0x00000000);
1772 nv_wr32(dev, 0x419e78, 0x00000000);
1773 nv_wr32(dev, 0x419e7c, 0x00000000);
1774 nv_wr32(dev, 0x419e80, 0x00000000);
1775 nv_wr32(dev, 0x419e84, 0x00000000);
1776 nv_wr32(dev, 0x419e88, 0x00000000);
1777 nv_wr32(dev, 0x419e8c, 0x00000000);
1778 nv_wr32(dev, 0x419e90, 0x00000000);
1779 nv_wr32(dev, 0x419e98, 0x00000000);
1780 if (chipset != 0xc0 && chipset != 0xc8)
1781 nv_wr32(dev, 0x419ee0, 0x00011110);
1782 nv_wr32(dev, 0x419f50, 0x00000000);
1783 nv_wr32(dev, 0x419f54, 0x00000000);
1784 if (chipset != 0xc0 && chipset != 0xc8)
1785 nv_wr32(dev, 0x419f58, 0x00000000);
1786}
1787
1788int
1789nvc0_grctx_generate(struct nouveau_channel *chan)
1790{
1791 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1792 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
1793 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
1794 struct drm_device *dev = chan->dev;
1795 int i, gpc, tp, id;
1796 u32 fermi = nvc0_graph_class(dev);
1797 u32 r000260, tmp;
1798
1799 r000260 = nv_rd32(dev, 0x000260);
1800 nv_wr32(dev, 0x000260, r000260 & ~1);
1801 nv_wr32(dev, 0x400208, 0x00000000);
1802
1803 nvc0_grctx_generate_dispatch(dev);
1804 nvc0_grctx_generate_macro(dev);
1805 nvc0_grctx_generate_m2mf(dev);
1806 nvc0_grctx_generate_unk47xx(dev);
1807 nvc0_grctx_generate_shaders(dev);
1808 nvc0_grctx_generate_unk60xx(dev);
1809 nvc0_grctx_generate_unk64xx(dev);
1810 nvc0_grctx_generate_tpbus(dev);
1811 nvc0_grctx_generate_ccache(dev);
1812 nvc0_grctx_generate_rop(dev);
1813 nvc0_grctx_generate_gpc(dev);
1814 nvc0_grctx_generate_tp(dev);
1815
1816 nv_wr32(dev, 0x404154, 0x00000000);
1817
1818 /* fuc "mmio list" writes */
1819 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
1820 u32 reg = nv_ro32(grch->mmio, i + 0);
1821 nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
1822 }
1823
1824 for (tp = 0, id = 0; tp < 4; tp++) {
1825 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
1826 if (tp < priv->tp_nr[gpc]) {
1827 nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
1828 nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
1829 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
1830 nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
1831 id++;
1832 }
1833
1834 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
1835 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
1836 }
1837 }
1838
1839 tmp = 0;
1840 for (i = 0; i < priv->gpc_nr; i++)
1841 tmp |= priv->tp_nr[i] << (i * 4);
1842 nv_wr32(dev, 0x406028, tmp);
1843 nv_wr32(dev, 0x405870, tmp);
1844
1845 nv_wr32(dev, 0x40602c, 0x00000000);
1846 nv_wr32(dev, 0x405874, 0x00000000);
1847 nv_wr32(dev, 0x406030, 0x00000000);
1848 nv_wr32(dev, 0x405878, 0x00000000);
1849 nv_wr32(dev, 0x406034, 0x00000000);
1850 nv_wr32(dev, 0x40587c, 0x00000000);
1851
1852 if (1) {
1853 u8 tpnr[GPC_MAX], data[TP_MAX];
1854
1855 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1856 memset(data, 0x1f, sizeof(data));
1857
1858 gpc = -1;
1859 for (tp = 0; tp < priv->tp_total; tp++) {
1860 do {
1861 gpc = (gpc + 1) % priv->gpc_nr;
1862 } while (!tpnr[gpc]);
1863 tpnr[gpc]--;
1864 data[tp] = gpc;
1865 }
1866
1867 for (i = 0; i < 4; i++)
1868 nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
1869 }
1870
1871 if (1) {
1872 u32 data[6] = {}, data2[2] = {};
1873 u8 tpnr[GPC_MAX];
1874 u8 shift, ntpcv;
1875
1876 /* calculate first set of magics */
1877 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1878
1879 gpc = -1;
1880 for (tp = 0; tp < priv->tp_total; tp++) {
1881 do {
1882 gpc = (gpc + 1) % priv->gpc_nr;
1883 } while (!tpnr[gpc]);
1884 tpnr[gpc]--;
1885
1886 data[tp / 6] |= gpc << ((tp % 6) * 5);
1887 }
1888
1889 for (; tp < 32; tp++)
1890 data[tp / 6] |= 7 << ((tp % 6) * 5);
1891
1892 /* and the second... */
1893 shift = 0;
1894 ntpcv = priv->tp_total;
1895 while (!(ntpcv & (1 << 4))) {
1896 ntpcv <<= 1;
1897 shift++;
1898 }
1899
1900 data2[0] = (ntpcv << 16);
1901 data2[0] |= (shift << 21);
1902 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
1903 for (i = 1; i < 7; i++)
1904 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
1905
1906 /* GPC_BROADCAST */
1907 nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
1908 priv->magic_not_rop_nr);
1909 for (i = 0; i < 6; i++)
1910 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
1911
1912 /* GPC_BROADCAST.TP_BROADCAST */
1913 nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
1914 priv->magic_not_rop_nr |
1915 data2[0]);
1916 nv_wr32(dev, 0x419be4, data2[1]);
1917 for (i = 0; i < 6; i++)
1918 nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
1919
1920 /* UNK78xx */
1921 nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
1922 priv->magic_not_rop_nr);
1923 for (i = 0; i < 6; i++)
1924 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
1925 }
1926
1927 if (1) {
1928 u32 tp_mask = 0, tp_set = 0;
1929 u8 tpnr[GPC_MAX], a, b;
1930
1931 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1932 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1933 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1934
1935 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1936 a = (i * (priv->tp_total - 1)) / 32;
1937 if (a != b) {
1938 b = a;
1939 do {
1940 gpc = (gpc + 1) % priv->gpc_nr;
1941 } while (!tpnr[gpc]);
1942 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1943
1944 tp_set |= 1 << ((gpc * 8) + tp);
1945 }
1946
1947 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1948 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1949 }
1950 }
1951
1952 nv_wr32(dev, 0x400208, 0x80000000);
1953
1954 nv_icmd(dev, 0x00001000, 0x00000004);
1955 nv_icmd(dev, 0x000000a9, 0x0000ffff);
1956 nv_icmd(dev, 0x00000038, 0x0fac6881);
1957 nv_icmd(dev, 0x0000003d, 0x00000001);
1958 nv_icmd(dev, 0x000000e8, 0x00000400);
1959 nv_icmd(dev, 0x000000e9, 0x00000400);
1960 nv_icmd(dev, 0x000000ea, 0x00000400);
1961 nv_icmd(dev, 0x000000eb, 0x00000400);
1962 nv_icmd(dev, 0x000000ec, 0x00000400);
1963 nv_icmd(dev, 0x000000ed, 0x00000400);
1964 nv_icmd(dev, 0x000000ee, 0x00000400);
1965 nv_icmd(dev, 0x000000ef, 0x00000400);
1966 nv_icmd(dev, 0x00000078, 0x00000300);
1967 nv_icmd(dev, 0x00000079, 0x00000300);
1968 nv_icmd(dev, 0x0000007a, 0x00000300);
1969 nv_icmd(dev, 0x0000007b, 0x00000300);
1970 nv_icmd(dev, 0x0000007c, 0x00000300);
1971 nv_icmd(dev, 0x0000007d, 0x00000300);
1972 nv_icmd(dev, 0x0000007e, 0x00000300);
1973 nv_icmd(dev, 0x0000007f, 0x00000300);
1974 nv_icmd(dev, 0x00000050, 0x00000011);
1975 nv_icmd(dev, 0x00000058, 0x00000008);
1976 nv_icmd(dev, 0x00000059, 0x00000008);
1977 nv_icmd(dev, 0x0000005a, 0x00000008);
1978 nv_icmd(dev, 0x0000005b, 0x00000008);
1979 nv_icmd(dev, 0x0000005c, 0x00000008);
1980 nv_icmd(dev, 0x0000005d, 0x00000008);
1981 nv_icmd(dev, 0x0000005e, 0x00000008);
1982 nv_icmd(dev, 0x0000005f, 0x00000008);
1983 nv_icmd(dev, 0x00000208, 0x00000001);
1984 nv_icmd(dev, 0x00000209, 0x00000001);
1985 nv_icmd(dev, 0x0000020a, 0x00000001);
1986 nv_icmd(dev, 0x0000020b, 0x00000001);
1987 nv_icmd(dev, 0x0000020c, 0x00000001);
1988 nv_icmd(dev, 0x0000020d, 0x00000001);
1989 nv_icmd(dev, 0x0000020e, 0x00000001);
1990 nv_icmd(dev, 0x0000020f, 0x00000001);
1991 nv_icmd(dev, 0x00000081, 0x00000001);
1992 nv_icmd(dev, 0x00000085, 0x00000004);
1993 nv_icmd(dev, 0x00000088, 0x00000400);
1994 nv_icmd(dev, 0x00000090, 0x00000300);
1995 nv_icmd(dev, 0x00000098, 0x00001001);
1996 nv_icmd(dev, 0x000000e3, 0x00000001);
1997 nv_icmd(dev, 0x000000da, 0x00000001);
1998 nv_icmd(dev, 0x000000f8, 0x00000003);
1999 nv_icmd(dev, 0x000000fa, 0x00000001);
2000 nv_icmd(dev, 0x0000009f, 0x0000ffff);
2001 nv_icmd(dev, 0x000000a0, 0x0000ffff);
2002 nv_icmd(dev, 0x000000a1, 0x0000ffff);
2003 nv_icmd(dev, 0x000000a2, 0x0000ffff);
2004 nv_icmd(dev, 0x000000b1, 0x00000001);
2005 nv_icmd(dev, 0x000000b2, 0x00000000);
2006 nv_icmd(dev, 0x000000b3, 0x00000000);
2007 nv_icmd(dev, 0x000000b4, 0x00000000);
2008 nv_icmd(dev, 0x000000b5, 0x00000000);
2009 nv_icmd(dev, 0x000000b6, 0x00000000);
2010 nv_icmd(dev, 0x000000b7, 0x00000000);
2011 nv_icmd(dev, 0x000000b8, 0x00000000);
2012 nv_icmd(dev, 0x000000b9, 0x00000000);
2013 nv_icmd(dev, 0x000000ba, 0x00000000);
2014 nv_icmd(dev, 0x000000bb, 0x00000000);
2015 nv_icmd(dev, 0x000000bc, 0x00000000);
2016 nv_icmd(dev, 0x000000bd, 0x00000000);
2017 nv_icmd(dev, 0x000000be, 0x00000000);
2018 nv_icmd(dev, 0x000000bf, 0x00000000);
2019 nv_icmd(dev, 0x000000c0, 0x00000000);
2020 nv_icmd(dev, 0x000000c1, 0x00000000);
2021 nv_icmd(dev, 0x000000c2, 0x00000000);
2022 nv_icmd(dev, 0x000000c3, 0x00000000);
2023 nv_icmd(dev, 0x000000c4, 0x00000000);
2024 nv_icmd(dev, 0x000000c5, 0x00000000);
2025 nv_icmd(dev, 0x000000c6, 0x00000000);
2026 nv_icmd(dev, 0x000000c7, 0x00000000);
2027 nv_icmd(dev, 0x000000c8, 0x00000000);
2028 nv_icmd(dev, 0x000000c9, 0x00000000);
2029 nv_icmd(dev, 0x000000ca, 0x00000000);
2030 nv_icmd(dev, 0x000000cb, 0x00000000);
2031 nv_icmd(dev, 0x000000cc, 0x00000000);
2032 nv_icmd(dev, 0x000000cd, 0x00000000);
2033 nv_icmd(dev, 0x000000ce, 0x00000000);
2034 nv_icmd(dev, 0x000000cf, 0x00000000);
2035 nv_icmd(dev, 0x000000d0, 0x00000000);
2036 nv_icmd(dev, 0x000000d1, 0x00000000);
2037 nv_icmd(dev, 0x000000d2, 0x00000000);
2038 nv_icmd(dev, 0x000000d3, 0x00000000);
2039 nv_icmd(dev, 0x000000d4, 0x00000000);
2040 nv_icmd(dev, 0x000000d5, 0x00000000);
2041 nv_icmd(dev, 0x000000d6, 0x00000000);
2042 nv_icmd(dev, 0x000000d7, 0x00000000);
2043 nv_icmd(dev, 0x000000d8, 0x00000000);
2044 nv_icmd(dev, 0x000000d9, 0x00000000);
2045 nv_icmd(dev, 0x00000210, 0x00000040);
2046 nv_icmd(dev, 0x00000211, 0x00000040);
2047 nv_icmd(dev, 0x00000212, 0x00000040);
2048 nv_icmd(dev, 0x00000213, 0x00000040);
2049 nv_icmd(dev, 0x00000214, 0x00000040);
2050 nv_icmd(dev, 0x00000215, 0x00000040);
2051 nv_icmd(dev, 0x00000216, 0x00000040);
2052 nv_icmd(dev, 0x00000217, 0x00000040);
2053 if (dev_priv->chipset == 0xd9) {
2054 for (i = 0x0400; i <= 0x0417; i++)
2055 nv_icmd(dev, i, 0x00000040);
2056 }
2057 nv_icmd(dev, 0x00000218, 0x0000c080);
2058 nv_icmd(dev, 0x00000219, 0x0000c080);
2059 nv_icmd(dev, 0x0000021a, 0x0000c080);
2060 nv_icmd(dev, 0x0000021b, 0x0000c080);
2061 nv_icmd(dev, 0x0000021c, 0x0000c080);
2062 nv_icmd(dev, 0x0000021d, 0x0000c080);
2063 nv_icmd(dev, 0x0000021e, 0x0000c080);
2064 nv_icmd(dev, 0x0000021f, 0x0000c080);
2065 if (dev_priv->chipset == 0xd9) {
2066 for (i = 0x0440; i <= 0x0457; i++)
2067 nv_icmd(dev, i, 0x0000c080);
2068 }
2069 nv_icmd(dev, 0x000000ad, 0x0000013e);
2070 nv_icmd(dev, 0x000000e1, 0x00000010);
2071 nv_icmd(dev, 0x00000290, 0x00000000);
2072 nv_icmd(dev, 0x00000291, 0x00000000);
2073 nv_icmd(dev, 0x00000292, 0x00000000);
2074 nv_icmd(dev, 0x00000293, 0x00000000);
2075 nv_icmd(dev, 0x00000294, 0x00000000);
2076 nv_icmd(dev, 0x00000295, 0x00000000);
2077 nv_icmd(dev, 0x00000296, 0x00000000);
2078 nv_icmd(dev, 0x00000297, 0x00000000);
2079 nv_icmd(dev, 0x00000298, 0x00000000);
2080 nv_icmd(dev, 0x00000299, 0x00000000);
2081 nv_icmd(dev, 0x0000029a, 0x00000000);
2082 nv_icmd(dev, 0x0000029b, 0x00000000);
2083 nv_icmd(dev, 0x0000029c, 0x00000000);
2084 nv_icmd(dev, 0x0000029d, 0x00000000);
2085 nv_icmd(dev, 0x0000029e, 0x00000000);
2086 nv_icmd(dev, 0x0000029f, 0x00000000);
2087 nv_icmd(dev, 0x000003b0, 0x00000000);
2088 nv_icmd(dev, 0x000003b1, 0x00000000);
2089 nv_icmd(dev, 0x000003b2, 0x00000000);
2090 nv_icmd(dev, 0x000003b3, 0x00000000);
2091 nv_icmd(dev, 0x000003b4, 0x00000000);
2092 nv_icmd(dev, 0x000003b5, 0x00000000);
2093 nv_icmd(dev, 0x000003b6, 0x00000000);
2094 nv_icmd(dev, 0x000003b7, 0x00000000);
2095 nv_icmd(dev, 0x000003b8, 0x00000000);
2096 nv_icmd(dev, 0x000003b9, 0x00000000);
2097 nv_icmd(dev, 0x000003ba, 0x00000000);
2098 nv_icmd(dev, 0x000003bb, 0x00000000);
2099 nv_icmd(dev, 0x000003bc, 0x00000000);
2100 nv_icmd(dev, 0x000003bd, 0x00000000);
2101 nv_icmd(dev, 0x000003be, 0x00000000);
2102 nv_icmd(dev, 0x000003bf, 0x00000000);
2103 nv_icmd(dev, 0x000002a0, 0x00000000);
2104 nv_icmd(dev, 0x000002a1, 0x00000000);
2105 nv_icmd(dev, 0x000002a2, 0x00000000);
2106 nv_icmd(dev, 0x000002a3, 0x00000000);
2107 nv_icmd(dev, 0x000002a4, 0x00000000);
2108 nv_icmd(dev, 0x000002a5, 0x00000000);
2109 nv_icmd(dev, 0x000002a6, 0x00000000);
2110 nv_icmd(dev, 0x000002a7, 0x00000000);
2111 nv_icmd(dev, 0x000002a8, 0x00000000);
2112 nv_icmd(dev, 0x000002a9, 0x00000000);
2113 nv_icmd(dev, 0x000002aa, 0x00000000);
2114 nv_icmd(dev, 0x000002ab, 0x00000000);
2115 nv_icmd(dev, 0x000002ac, 0x00000000);
2116 nv_icmd(dev, 0x000002ad, 0x00000000);
2117 nv_icmd(dev, 0x000002ae, 0x00000000);
2118 nv_icmd(dev, 0x000002af, 0x00000000);
2119 nv_icmd(dev, 0x00000420, 0x00000000);
2120 nv_icmd(dev, 0x00000421, 0x00000000);
2121 nv_icmd(dev, 0x00000422, 0x00000000);
2122 nv_icmd(dev, 0x00000423, 0x00000000);
2123 nv_icmd(dev, 0x00000424, 0x00000000);
2124 nv_icmd(dev, 0x00000425, 0x00000000);
2125 nv_icmd(dev, 0x00000426, 0x00000000);
2126 nv_icmd(dev, 0x00000427, 0x00000000);
2127 nv_icmd(dev, 0x00000428, 0x00000000);
2128 nv_icmd(dev, 0x00000429, 0x00000000);
2129 nv_icmd(dev, 0x0000042a, 0x00000000);
2130 nv_icmd(dev, 0x0000042b, 0x00000000);
2131 nv_icmd(dev, 0x0000042c, 0x00000000);
2132 nv_icmd(dev, 0x0000042d, 0x00000000);
2133 nv_icmd(dev, 0x0000042e, 0x00000000);
2134 nv_icmd(dev, 0x0000042f, 0x00000000);
2135 nv_icmd(dev, 0x000002b0, 0x00000000);
2136 nv_icmd(dev, 0x000002b1, 0x00000000);
2137 nv_icmd(dev, 0x000002b2, 0x00000000);
2138 nv_icmd(dev, 0x000002b3, 0x00000000);
2139 nv_icmd(dev, 0x000002b4, 0x00000000);
2140 nv_icmd(dev, 0x000002b5, 0x00000000);
2141 nv_icmd(dev, 0x000002b6, 0x00000000);
2142 nv_icmd(dev, 0x000002b7, 0x00000000);
2143 nv_icmd(dev, 0x000002b8, 0x00000000);
2144 nv_icmd(dev, 0x000002b9, 0x00000000);
2145 nv_icmd(dev, 0x000002ba, 0x00000000);
2146 nv_icmd(dev, 0x000002bb, 0x00000000);
2147 nv_icmd(dev, 0x000002bc, 0x00000000);
2148 nv_icmd(dev, 0x000002bd, 0x00000000);
2149 nv_icmd(dev, 0x000002be, 0x00000000);
2150 nv_icmd(dev, 0x000002bf, 0x00000000);
2151 nv_icmd(dev, 0x00000430, 0x00000000);
2152 nv_icmd(dev, 0x00000431, 0x00000000);
2153 nv_icmd(dev, 0x00000432, 0x00000000);
2154 nv_icmd(dev, 0x00000433, 0x00000000);
2155 nv_icmd(dev, 0x00000434, 0x00000000);
2156 nv_icmd(dev, 0x00000435, 0x00000000);
2157 nv_icmd(dev, 0x00000436, 0x00000000);
2158 nv_icmd(dev, 0x00000437, 0x00000000);
2159 nv_icmd(dev, 0x00000438, 0x00000000);
2160 nv_icmd(dev, 0x00000439, 0x00000000);
2161 nv_icmd(dev, 0x0000043a, 0x00000000);
2162 nv_icmd(dev, 0x0000043b, 0x00000000);
2163 nv_icmd(dev, 0x0000043c, 0x00000000);
2164 nv_icmd(dev, 0x0000043d, 0x00000000);
2165 nv_icmd(dev, 0x0000043e, 0x00000000);
2166 nv_icmd(dev, 0x0000043f, 0x00000000);
2167 nv_icmd(dev, 0x000002c0, 0x00000000);
2168 nv_icmd(dev, 0x000002c1, 0x00000000);
2169 nv_icmd(dev, 0x000002c2, 0x00000000);
2170 nv_icmd(dev, 0x000002c3, 0x00000000);
2171 nv_icmd(dev, 0x000002c4, 0x00000000);
2172 nv_icmd(dev, 0x000002c5, 0x00000000);
2173 nv_icmd(dev, 0x000002c6, 0x00000000);
2174 nv_icmd(dev, 0x000002c7, 0x00000000);
2175 nv_icmd(dev, 0x000002c8, 0x00000000);
2176 nv_icmd(dev, 0x000002c9, 0x00000000);
2177 nv_icmd(dev, 0x000002ca, 0x00000000);
2178 nv_icmd(dev, 0x000002cb, 0x00000000);
2179 nv_icmd(dev, 0x000002cc, 0x00000000);
2180 nv_icmd(dev, 0x000002cd, 0x00000000);
2181 nv_icmd(dev, 0x000002ce, 0x00000000);
2182 nv_icmd(dev, 0x000002cf, 0x00000000);
2183 nv_icmd(dev, 0x000004d0, 0x00000000);
2184 nv_icmd(dev, 0x000004d1, 0x00000000);
2185 nv_icmd(dev, 0x000004d2, 0x00000000);
2186 nv_icmd(dev, 0x000004d3, 0x00000000);
2187 nv_icmd(dev, 0x000004d4, 0x00000000);
2188 nv_icmd(dev, 0x000004d5, 0x00000000);
2189 nv_icmd(dev, 0x000004d6, 0x00000000);
2190 nv_icmd(dev, 0x000004d7, 0x00000000);
2191 nv_icmd(dev, 0x000004d8, 0x00000000);
2192 nv_icmd(dev, 0x000004d9, 0x00000000);
2193 nv_icmd(dev, 0x000004da, 0x00000000);
2194 nv_icmd(dev, 0x000004db, 0x00000000);
2195 nv_icmd(dev, 0x000004dc, 0x00000000);
2196 nv_icmd(dev, 0x000004dd, 0x00000000);
2197 nv_icmd(dev, 0x000004de, 0x00000000);
2198 nv_icmd(dev, 0x000004df, 0x00000000);
2199 nv_icmd(dev, 0x00000720, 0x00000000);
2200 nv_icmd(dev, 0x00000721, 0x00000000);
2201 nv_icmd(dev, 0x00000722, 0x00000000);
2202 nv_icmd(dev, 0x00000723, 0x00000000);
2203 nv_icmd(dev, 0x00000724, 0x00000000);
2204 nv_icmd(dev, 0x00000725, 0x00000000);
2205 nv_icmd(dev, 0x00000726, 0x00000000);
2206 nv_icmd(dev, 0x00000727, 0x00000000);
2207 nv_icmd(dev, 0x00000728, 0x00000000);
2208 nv_icmd(dev, 0x00000729, 0x00000000);
2209 nv_icmd(dev, 0x0000072a, 0x00000000);
2210 nv_icmd(dev, 0x0000072b, 0x00000000);
2211 nv_icmd(dev, 0x0000072c, 0x00000000);
2212 nv_icmd(dev, 0x0000072d, 0x00000000);
2213 nv_icmd(dev, 0x0000072e, 0x00000000);
2214 nv_icmd(dev, 0x0000072f, 0x00000000);
2215 nv_icmd(dev, 0x000008c0, 0x00000000);
2216 nv_icmd(dev, 0x000008c1, 0x00000000);
2217 nv_icmd(dev, 0x000008c2, 0x00000000);
2218 nv_icmd(dev, 0x000008c3, 0x00000000);
2219 nv_icmd(dev, 0x000008c4, 0x00000000);
2220 nv_icmd(dev, 0x000008c5, 0x00000000);
2221 nv_icmd(dev, 0x000008c6, 0x00000000);
2222 nv_icmd(dev, 0x000008c7, 0x00000000);
2223 nv_icmd(dev, 0x000008c8, 0x00000000);
2224 nv_icmd(dev, 0x000008c9, 0x00000000);
2225 nv_icmd(dev, 0x000008ca, 0x00000000);
2226 nv_icmd(dev, 0x000008cb, 0x00000000);
2227 nv_icmd(dev, 0x000008cc, 0x00000000);
2228 nv_icmd(dev, 0x000008cd, 0x00000000);
2229 nv_icmd(dev, 0x000008ce, 0x00000000);
2230 nv_icmd(dev, 0x000008cf, 0x00000000);
2231 nv_icmd(dev, 0x00000890, 0x00000000);
2232 nv_icmd(dev, 0x00000891, 0x00000000);
2233 nv_icmd(dev, 0x00000892, 0x00000000);
2234 nv_icmd(dev, 0x00000893, 0x00000000);
2235 nv_icmd(dev, 0x00000894, 0x00000000);
2236 nv_icmd(dev, 0x00000895, 0x00000000);
2237 nv_icmd(dev, 0x00000896, 0x00000000);
2238 nv_icmd(dev, 0x00000897, 0x00000000);
2239 nv_icmd(dev, 0x00000898, 0x00000000);
2240 nv_icmd(dev, 0x00000899, 0x00000000);
2241 nv_icmd(dev, 0x0000089a, 0x00000000);
2242 nv_icmd(dev, 0x0000089b, 0x00000000);
2243 nv_icmd(dev, 0x0000089c, 0x00000000);
2244 nv_icmd(dev, 0x0000089d, 0x00000000);
2245 nv_icmd(dev, 0x0000089e, 0x00000000);
2246 nv_icmd(dev, 0x0000089f, 0x00000000);
2247 nv_icmd(dev, 0x000008e0, 0x00000000);
2248 nv_icmd(dev, 0x000008e1, 0x00000000);
2249 nv_icmd(dev, 0x000008e2, 0x00000000);
2250 nv_icmd(dev, 0x000008e3, 0x00000000);
2251 nv_icmd(dev, 0x000008e4, 0x00000000);
2252 nv_icmd(dev, 0x000008e5, 0x00000000);
2253 nv_icmd(dev, 0x000008e6, 0x00000000);
2254 nv_icmd(dev, 0x000008e7, 0x00000000);
2255 nv_icmd(dev, 0x000008e8, 0x00000000);
2256 nv_icmd(dev, 0x000008e9, 0x00000000);
2257 nv_icmd(dev, 0x000008ea, 0x00000000);
2258 nv_icmd(dev, 0x000008eb, 0x00000000);
2259 nv_icmd(dev, 0x000008ec, 0x00000000);
2260 nv_icmd(dev, 0x000008ed, 0x00000000);
2261 nv_icmd(dev, 0x000008ee, 0x00000000);
2262 nv_icmd(dev, 0x000008ef, 0x00000000);
2263 nv_icmd(dev, 0x000008a0, 0x00000000);
2264 nv_icmd(dev, 0x000008a1, 0x00000000);
2265 nv_icmd(dev, 0x000008a2, 0x00000000);
2266 nv_icmd(dev, 0x000008a3, 0x00000000);
2267 nv_icmd(dev, 0x000008a4, 0x00000000);
2268 nv_icmd(dev, 0x000008a5, 0x00000000);
2269 nv_icmd(dev, 0x000008a6, 0x00000000);
2270 nv_icmd(dev, 0x000008a7, 0x00000000);
2271 nv_icmd(dev, 0x000008a8, 0x00000000);
2272 nv_icmd(dev, 0x000008a9, 0x00000000);
2273 nv_icmd(dev, 0x000008aa, 0x00000000);
2274 nv_icmd(dev, 0x000008ab, 0x00000000);
2275 nv_icmd(dev, 0x000008ac, 0x00000000);
2276 nv_icmd(dev, 0x000008ad, 0x00000000);
2277 nv_icmd(dev, 0x000008ae, 0x00000000);
2278 nv_icmd(dev, 0x000008af, 0x00000000);
2279 nv_icmd(dev, 0x000008f0, 0x00000000);
2280 nv_icmd(dev, 0x000008f1, 0x00000000);
2281 nv_icmd(dev, 0x000008f2, 0x00000000);
2282 nv_icmd(dev, 0x000008f3, 0x00000000);
2283 nv_icmd(dev, 0x000008f4, 0x00000000);
2284 nv_icmd(dev, 0x000008f5, 0x00000000);
2285 nv_icmd(dev, 0x000008f6, 0x00000000);
2286 nv_icmd(dev, 0x000008f7, 0x00000000);
2287 nv_icmd(dev, 0x000008f8, 0x00000000);
2288 nv_icmd(dev, 0x000008f9, 0x00000000);
2289 nv_icmd(dev, 0x000008fa, 0x00000000);
2290 nv_icmd(dev, 0x000008fb, 0x00000000);
2291 nv_icmd(dev, 0x000008fc, 0x00000000);
2292 nv_icmd(dev, 0x000008fd, 0x00000000);
2293 nv_icmd(dev, 0x000008fe, 0x00000000);
2294 nv_icmd(dev, 0x000008ff, 0x00000000);
2295 nv_icmd(dev, 0x0000094c, 0x000000ff);
2296 nv_icmd(dev, 0x0000094d, 0xffffffff);
2297 nv_icmd(dev, 0x0000094e, 0x00000002);
2298 nv_icmd(dev, 0x000002ec, 0x00000001);
2299 nv_icmd(dev, 0x00000303, 0x00000001);
2300 nv_icmd(dev, 0x000002e6, 0x00000001);
2301 nv_icmd(dev, 0x00000466, 0x00000052);
2302 nv_icmd(dev, 0x00000301, 0x3f800000);
2303 nv_icmd(dev, 0x00000304, 0x30201000);
2304 nv_icmd(dev, 0x00000305, 0x70605040);
2305 nv_icmd(dev, 0x00000306, 0xb8a89888);
2306 nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
2307 nv_icmd(dev, 0x0000030a, 0x00ffff00);
2308 nv_icmd(dev, 0x0000030b, 0x0000001a);
2309 nv_icmd(dev, 0x0000030c, 0x00000001);
2310 nv_icmd(dev, 0x00000318, 0x00000001);
2311 nv_icmd(dev, 0x00000340, 0x00000000);
2312 nv_icmd(dev, 0x00000375, 0x00000001);
2313 nv_icmd(dev, 0x00000351, 0x00000100);
2314 nv_icmd(dev, 0x0000037d, 0x00000006);
2315 nv_icmd(dev, 0x000003a0, 0x00000002);
2316 nv_icmd(dev, 0x000003aa, 0x00000001);
2317 nv_icmd(dev, 0x000003a9, 0x00000001);
2318 nv_icmd(dev, 0x00000380, 0x00000001);
2319 nv_icmd(dev, 0x00000360, 0x00000040);
2320 nv_icmd(dev, 0x00000366, 0x00000000);
2321 nv_icmd(dev, 0x00000367, 0x00000000);
2322 nv_icmd(dev, 0x00000368, 0x00001fff);
2323 nv_icmd(dev, 0x00000370, 0x00000000);
2324 nv_icmd(dev, 0x00000371, 0x00000000);
2325 nv_icmd(dev, 0x00000372, 0x003fffff);
2326 nv_icmd(dev, 0x0000037a, 0x00000012);
2327 nv_icmd(dev, 0x000005e0, 0x00000022);
2328 nv_icmd(dev, 0x000005e1, 0x00000022);
2329 nv_icmd(dev, 0x000005e2, 0x00000022);
2330 nv_icmd(dev, 0x000005e3, 0x00000022);
2331 nv_icmd(dev, 0x000005e4, 0x00000022);
2332 nv_icmd(dev, 0x00000619, 0x00000003);
2333 nv_icmd(dev, 0x00000811, 0x00000003);
2334 nv_icmd(dev, 0x00000812, 0x00000004);
2335 nv_icmd(dev, 0x00000813, 0x00000006);
2336 nv_icmd(dev, 0x00000814, 0x00000008);
2337 nv_icmd(dev, 0x00000815, 0x0000000b);
2338 nv_icmd(dev, 0x00000800, 0x00000001);
2339 nv_icmd(dev, 0x00000801, 0x00000001);
2340 nv_icmd(dev, 0x00000802, 0x00000001);
2341 nv_icmd(dev, 0x00000803, 0x00000001);
2342 nv_icmd(dev, 0x00000804, 0x00000001);
2343 nv_icmd(dev, 0x00000805, 0x00000001);
2344 nv_icmd(dev, 0x00000632, 0x00000001);
2345 nv_icmd(dev, 0x00000633, 0x00000002);
2346 nv_icmd(dev, 0x00000634, 0x00000003);
2347 nv_icmd(dev, 0x00000635, 0x00000004);
2348 nv_icmd(dev, 0x00000654, 0x3f800000);
2349 nv_icmd(dev, 0x00000657, 0x3f800000);
2350 nv_icmd(dev, 0x00000655, 0x3f800000);
2351 nv_icmd(dev, 0x00000656, 0x3f800000);
2352 nv_icmd(dev, 0x000006cd, 0x3f800000);
2353 nv_icmd(dev, 0x000007f5, 0x3f800000);
2354 nv_icmd(dev, 0x000007dc, 0x39291909);
2355 nv_icmd(dev, 0x000007dd, 0x79695949);
2356 nv_icmd(dev, 0x000007de, 0xb9a99989);
2357 nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
2358 nv_icmd(dev, 0x000007e8, 0x00003210);
2359 nv_icmd(dev, 0x000007e9, 0x00007654);
2360 nv_icmd(dev, 0x000007ea, 0x00000098);
2361 nv_icmd(dev, 0x000007ec, 0x39291909);
2362 nv_icmd(dev, 0x000007ed, 0x79695949);
2363 nv_icmd(dev, 0x000007ee, 0xb9a99989);
2364 nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
2365 nv_icmd(dev, 0x000007f0, 0x00003210);
2366 nv_icmd(dev, 0x000007f1, 0x00007654);
2367 nv_icmd(dev, 0x000007f2, 0x00000098);
2368 nv_icmd(dev, 0x000005a5, 0x00000001);
2369 nv_icmd(dev, 0x00000980, 0x00000000);
2370 nv_icmd(dev, 0x00000981, 0x00000000);
2371 nv_icmd(dev, 0x00000982, 0x00000000);
2372 nv_icmd(dev, 0x00000983, 0x00000000);
2373 nv_icmd(dev, 0x00000984, 0x00000000);
2374 nv_icmd(dev, 0x00000985, 0x00000000);
2375 nv_icmd(dev, 0x00000986, 0x00000000);
2376 nv_icmd(dev, 0x00000987, 0x00000000);
2377 nv_icmd(dev, 0x00000988, 0x00000000);
2378 nv_icmd(dev, 0x00000989, 0x00000000);
2379 nv_icmd(dev, 0x0000098a, 0x00000000);
2380 nv_icmd(dev, 0x0000098b, 0x00000000);
2381 nv_icmd(dev, 0x0000098c, 0x00000000);
2382 nv_icmd(dev, 0x0000098d, 0x00000000);
2383 nv_icmd(dev, 0x0000098e, 0x00000000);
2384 nv_icmd(dev, 0x0000098f, 0x00000000);
2385 nv_icmd(dev, 0x00000990, 0x00000000);
2386 nv_icmd(dev, 0x00000991, 0x00000000);
2387 nv_icmd(dev, 0x00000992, 0x00000000);
2388 nv_icmd(dev, 0x00000993, 0x00000000);
2389 nv_icmd(dev, 0x00000994, 0x00000000);
2390 nv_icmd(dev, 0x00000995, 0x00000000);
2391 nv_icmd(dev, 0x00000996, 0x00000000);
2392 nv_icmd(dev, 0x00000997, 0x00000000);
2393 nv_icmd(dev, 0x00000998, 0x00000000);
2394 nv_icmd(dev, 0x00000999, 0x00000000);
2395 nv_icmd(dev, 0x0000099a, 0x00000000);
2396 nv_icmd(dev, 0x0000099b, 0x00000000);
2397 nv_icmd(dev, 0x0000099c, 0x00000000);
2398 nv_icmd(dev, 0x0000099d, 0x00000000);
2399 nv_icmd(dev, 0x0000099e, 0x00000000);
2400 nv_icmd(dev, 0x0000099f, 0x00000000);
2401 nv_icmd(dev, 0x000009a0, 0x00000000);
2402 nv_icmd(dev, 0x000009a1, 0x00000000);
2403 nv_icmd(dev, 0x000009a2, 0x00000000);
2404 nv_icmd(dev, 0x000009a3, 0x00000000);
2405 nv_icmd(dev, 0x000009a4, 0x00000000);
2406 nv_icmd(dev, 0x000009a5, 0x00000000);
2407 nv_icmd(dev, 0x000009a6, 0x00000000);
2408 nv_icmd(dev, 0x000009a7, 0x00000000);
2409 nv_icmd(dev, 0x000009a8, 0x00000000);
2410 nv_icmd(dev, 0x000009a9, 0x00000000);
2411 nv_icmd(dev, 0x000009aa, 0x00000000);
2412 nv_icmd(dev, 0x000009ab, 0x00000000);
2413 nv_icmd(dev, 0x000009ac, 0x00000000);
2414 nv_icmd(dev, 0x000009ad, 0x00000000);
2415 nv_icmd(dev, 0x000009ae, 0x00000000);
2416 nv_icmd(dev, 0x000009af, 0x00000000);
2417 nv_icmd(dev, 0x000009b0, 0x00000000);
2418 nv_icmd(dev, 0x000009b1, 0x00000000);
2419 nv_icmd(dev, 0x000009b2, 0x00000000);
2420 nv_icmd(dev, 0x000009b3, 0x00000000);
2421 nv_icmd(dev, 0x000009b4, 0x00000000);
2422 nv_icmd(dev, 0x000009b5, 0x00000000);
2423 nv_icmd(dev, 0x000009b6, 0x00000000);
2424 nv_icmd(dev, 0x000009b7, 0x00000000);
2425 nv_icmd(dev, 0x000009b8, 0x00000000);
2426 nv_icmd(dev, 0x000009b9, 0x00000000);
2427 nv_icmd(dev, 0x000009ba, 0x00000000);
2428 nv_icmd(dev, 0x000009bb, 0x00000000);
2429 nv_icmd(dev, 0x000009bc, 0x00000000);
2430 nv_icmd(dev, 0x000009bd, 0x00000000);
2431 nv_icmd(dev, 0x000009be, 0x00000000);
2432 nv_icmd(dev, 0x000009bf, 0x00000000);
2433 nv_icmd(dev, 0x000009c0, 0x00000000);
2434 nv_icmd(dev, 0x000009c1, 0x00000000);
2435 nv_icmd(dev, 0x000009c2, 0x00000000);
2436 nv_icmd(dev, 0x000009c3, 0x00000000);
2437 nv_icmd(dev, 0x000009c4, 0x00000000);
2438 nv_icmd(dev, 0x000009c5, 0x00000000);
2439 nv_icmd(dev, 0x000009c6, 0x00000000);
2440 nv_icmd(dev, 0x000009c7, 0x00000000);
2441 nv_icmd(dev, 0x000009c8, 0x00000000);
2442 nv_icmd(dev, 0x000009c9, 0x00000000);
2443 nv_icmd(dev, 0x000009ca, 0x00000000);
2444 nv_icmd(dev, 0x000009cb, 0x00000000);
2445 nv_icmd(dev, 0x000009cc, 0x00000000);
2446 nv_icmd(dev, 0x000009cd, 0x00000000);
2447 nv_icmd(dev, 0x000009ce, 0x00000000);
2448 nv_icmd(dev, 0x000009cf, 0x00000000);
2449 nv_icmd(dev, 0x000009d0, 0x00000000);
2450 nv_icmd(dev, 0x000009d1, 0x00000000);
2451 nv_icmd(dev, 0x000009d2, 0x00000000);
2452 nv_icmd(dev, 0x000009d3, 0x00000000);
2453 nv_icmd(dev, 0x000009d4, 0x00000000);
2454 nv_icmd(dev, 0x000009d5, 0x00000000);
2455 nv_icmd(dev, 0x000009d6, 0x00000000);
2456 nv_icmd(dev, 0x000009d7, 0x00000000);
2457 nv_icmd(dev, 0x000009d8, 0x00000000);
2458 nv_icmd(dev, 0x000009d9, 0x00000000);
2459 nv_icmd(dev, 0x000009da, 0x00000000);
2460 nv_icmd(dev, 0x000009db, 0x00000000);
2461 nv_icmd(dev, 0x000009dc, 0x00000000);
2462 nv_icmd(dev, 0x000009dd, 0x00000000);
2463 nv_icmd(dev, 0x000009de, 0x00000000);
2464 nv_icmd(dev, 0x000009df, 0x00000000);
2465 nv_icmd(dev, 0x000009e0, 0x00000000);
2466 nv_icmd(dev, 0x000009e1, 0x00000000);
2467 nv_icmd(dev, 0x000009e2, 0x00000000);
2468 nv_icmd(dev, 0x000009e3, 0x00000000);
2469 nv_icmd(dev, 0x000009e4, 0x00000000);
2470 nv_icmd(dev, 0x000009e5, 0x00000000);
2471 nv_icmd(dev, 0x000009e6, 0x00000000);
2472 nv_icmd(dev, 0x000009e7, 0x00000000);
2473 nv_icmd(dev, 0x000009e8, 0x00000000);
2474 nv_icmd(dev, 0x000009e9, 0x00000000);
2475 nv_icmd(dev, 0x000009ea, 0x00000000);
2476 nv_icmd(dev, 0x000009eb, 0x00000000);
2477 nv_icmd(dev, 0x000009ec, 0x00000000);
2478 nv_icmd(dev, 0x000009ed, 0x00000000);
2479 nv_icmd(dev, 0x000009ee, 0x00000000);
2480 nv_icmd(dev, 0x000009ef, 0x00000000);
2481 nv_icmd(dev, 0x000009f0, 0x00000000);
2482 nv_icmd(dev, 0x000009f1, 0x00000000);
2483 nv_icmd(dev, 0x000009f2, 0x00000000);
2484 nv_icmd(dev, 0x000009f3, 0x00000000);
2485 nv_icmd(dev, 0x000009f4, 0x00000000);
2486 nv_icmd(dev, 0x000009f5, 0x00000000);
2487 nv_icmd(dev, 0x000009f6, 0x00000000);
2488 nv_icmd(dev, 0x000009f7, 0x00000000);
2489 nv_icmd(dev, 0x000009f8, 0x00000000);
2490 nv_icmd(dev, 0x000009f9, 0x00000000);
2491 nv_icmd(dev, 0x000009fa, 0x00000000);
2492 nv_icmd(dev, 0x000009fb, 0x00000000);
2493 nv_icmd(dev, 0x000009fc, 0x00000000);
2494 nv_icmd(dev, 0x000009fd, 0x00000000);
2495 nv_icmd(dev, 0x000009fe, 0x00000000);
2496 nv_icmd(dev, 0x000009ff, 0x00000000);
2497 nv_icmd(dev, 0x00000468, 0x00000004);
2498 nv_icmd(dev, 0x0000046c, 0x00000001);
2499 nv_icmd(dev, 0x00000470, 0x00000000);
2500 nv_icmd(dev, 0x00000471, 0x00000000);
2501 nv_icmd(dev, 0x00000472, 0x00000000);
2502 nv_icmd(dev, 0x00000473, 0x00000000);
2503 nv_icmd(dev, 0x00000474, 0x00000000);
2504 nv_icmd(dev, 0x00000475, 0x00000000);
2505 nv_icmd(dev, 0x00000476, 0x00000000);
2506 nv_icmd(dev, 0x00000477, 0x00000000);
2507 nv_icmd(dev, 0x00000478, 0x00000000);
2508 nv_icmd(dev, 0x00000479, 0x00000000);
2509 nv_icmd(dev, 0x0000047a, 0x00000000);
2510 nv_icmd(dev, 0x0000047b, 0x00000000);
2511 nv_icmd(dev, 0x0000047c, 0x00000000);
2512 nv_icmd(dev, 0x0000047d, 0x00000000);
2513 nv_icmd(dev, 0x0000047e, 0x00000000);
2514 nv_icmd(dev, 0x0000047f, 0x00000000);
2515 nv_icmd(dev, 0x00000480, 0x00000000);
2516 nv_icmd(dev, 0x00000481, 0x00000000);
2517 nv_icmd(dev, 0x00000482, 0x00000000);
2518 nv_icmd(dev, 0x00000483, 0x00000000);
2519 nv_icmd(dev, 0x00000484, 0x00000000);
2520 nv_icmd(dev, 0x00000485, 0x00000000);
2521 nv_icmd(dev, 0x00000486, 0x00000000);
2522 nv_icmd(dev, 0x00000487, 0x00000000);
2523 nv_icmd(dev, 0x00000488, 0x00000000);
2524 nv_icmd(dev, 0x00000489, 0x00000000);
2525 nv_icmd(dev, 0x0000048a, 0x00000000);
2526 nv_icmd(dev, 0x0000048b, 0x00000000);
2527 nv_icmd(dev, 0x0000048c, 0x00000000);
2528 nv_icmd(dev, 0x0000048d, 0x00000000);
2529 nv_icmd(dev, 0x0000048e, 0x00000000);
2530 nv_icmd(dev, 0x0000048f, 0x00000000);
2531 nv_icmd(dev, 0x00000490, 0x00000000);
2532 nv_icmd(dev, 0x00000491, 0x00000000);
2533 nv_icmd(dev, 0x00000492, 0x00000000);
2534 nv_icmd(dev, 0x00000493, 0x00000000);
2535 nv_icmd(dev, 0x00000494, 0x00000000);
2536 nv_icmd(dev, 0x00000495, 0x00000000);
2537 nv_icmd(dev, 0x00000496, 0x00000000);
2538 nv_icmd(dev, 0x00000497, 0x00000000);
2539 nv_icmd(dev, 0x00000498, 0x00000000);
2540 nv_icmd(dev, 0x00000499, 0x00000000);
2541 nv_icmd(dev, 0x0000049a, 0x00000000);
2542 nv_icmd(dev, 0x0000049b, 0x00000000);
2543 nv_icmd(dev, 0x0000049c, 0x00000000);
2544 nv_icmd(dev, 0x0000049d, 0x00000000);
2545 nv_icmd(dev, 0x0000049e, 0x00000000);
2546 nv_icmd(dev, 0x0000049f, 0x00000000);
2547 nv_icmd(dev, 0x000004a0, 0x00000000);
2548 nv_icmd(dev, 0x000004a1, 0x00000000);
2549 nv_icmd(dev, 0x000004a2, 0x00000000);
2550 nv_icmd(dev, 0x000004a3, 0x00000000);
2551 nv_icmd(dev, 0x000004a4, 0x00000000);
2552 nv_icmd(dev, 0x000004a5, 0x00000000);
2553 nv_icmd(dev, 0x000004a6, 0x00000000);
2554 nv_icmd(dev, 0x000004a7, 0x00000000);
2555 nv_icmd(dev, 0x000004a8, 0x00000000);
2556 nv_icmd(dev, 0x000004a9, 0x00000000);
2557 nv_icmd(dev, 0x000004aa, 0x00000000);
2558 nv_icmd(dev, 0x000004ab, 0x00000000);
2559 nv_icmd(dev, 0x000004ac, 0x00000000);
2560 nv_icmd(dev, 0x000004ad, 0x00000000);
2561 nv_icmd(dev, 0x000004ae, 0x00000000);
2562 nv_icmd(dev, 0x000004af, 0x00000000);
2563 nv_icmd(dev, 0x000004b0, 0x00000000);
2564 nv_icmd(dev, 0x000004b1, 0x00000000);
2565 nv_icmd(dev, 0x000004b2, 0x00000000);
2566 nv_icmd(dev, 0x000004b3, 0x00000000);
2567 nv_icmd(dev, 0x000004b4, 0x00000000);
2568 nv_icmd(dev, 0x000004b5, 0x00000000);
2569 nv_icmd(dev, 0x000004b6, 0x00000000);
2570 nv_icmd(dev, 0x000004b7, 0x00000000);
2571 nv_icmd(dev, 0x000004b8, 0x00000000);
2572 nv_icmd(dev, 0x000004b9, 0x00000000);
2573 nv_icmd(dev, 0x000004ba, 0x00000000);
2574 nv_icmd(dev, 0x000004bb, 0x00000000);
2575 nv_icmd(dev, 0x000004bc, 0x00000000);
2576 nv_icmd(dev, 0x000004bd, 0x00000000);
2577 nv_icmd(dev, 0x000004be, 0x00000000);
2578 nv_icmd(dev, 0x000004bf, 0x00000000);
2579 nv_icmd(dev, 0x000004c0, 0x00000000);
2580 nv_icmd(dev, 0x000004c1, 0x00000000);
2581 nv_icmd(dev, 0x000004c2, 0x00000000);
2582 nv_icmd(dev, 0x000004c3, 0x00000000);
2583 nv_icmd(dev, 0x000004c4, 0x00000000);
2584 nv_icmd(dev, 0x000004c5, 0x00000000);
2585 nv_icmd(dev, 0x000004c6, 0x00000000);
2586 nv_icmd(dev, 0x000004c7, 0x00000000);
2587 nv_icmd(dev, 0x000004c8, 0x00000000);
2588 nv_icmd(dev, 0x000004c9, 0x00000000);
2589 nv_icmd(dev, 0x000004ca, 0x00000000);
2590 nv_icmd(dev, 0x000004cb, 0x00000000);
2591 nv_icmd(dev, 0x000004cc, 0x00000000);
2592 nv_icmd(dev, 0x000004cd, 0x00000000);
2593 nv_icmd(dev, 0x000004ce, 0x00000000);
2594 nv_icmd(dev, 0x000004cf, 0x00000000);
2595 nv_icmd(dev, 0x00000510, 0x3f800000);
2596 nv_icmd(dev, 0x00000511, 0x3f800000);
2597 nv_icmd(dev, 0x00000512, 0x3f800000);
2598 nv_icmd(dev, 0x00000513, 0x3f800000);
2599 nv_icmd(dev, 0x00000514, 0x3f800000);
2600 nv_icmd(dev, 0x00000515, 0x3f800000);
2601 nv_icmd(dev, 0x00000516, 0x3f800000);
2602 nv_icmd(dev, 0x00000517, 0x3f800000);
2603 nv_icmd(dev, 0x00000518, 0x3f800000);
2604 nv_icmd(dev, 0x00000519, 0x3f800000);
2605 nv_icmd(dev, 0x0000051a, 0x3f800000);
2606 nv_icmd(dev, 0x0000051b, 0x3f800000);
2607 nv_icmd(dev, 0x0000051c, 0x3f800000);
2608 nv_icmd(dev, 0x0000051d, 0x3f800000);
2609 nv_icmd(dev, 0x0000051e, 0x3f800000);
2610 nv_icmd(dev, 0x0000051f, 0x3f800000);
2611 nv_icmd(dev, 0x00000520, 0x000002b6);
2612 nv_icmd(dev, 0x00000529, 0x00000001);
2613 nv_icmd(dev, 0x00000530, 0xffff0000);
2614 nv_icmd(dev, 0x00000531, 0xffff0000);
2615 nv_icmd(dev, 0x00000532, 0xffff0000);
2616 nv_icmd(dev, 0x00000533, 0xffff0000);
2617 nv_icmd(dev, 0x00000534, 0xffff0000);
2618 nv_icmd(dev, 0x00000535, 0xffff0000);
2619 nv_icmd(dev, 0x00000536, 0xffff0000);
2620 nv_icmd(dev, 0x00000537, 0xffff0000);
2621 nv_icmd(dev, 0x00000538, 0xffff0000);
2622 nv_icmd(dev, 0x00000539, 0xffff0000);
2623 nv_icmd(dev, 0x0000053a, 0xffff0000);
2624 nv_icmd(dev, 0x0000053b, 0xffff0000);
2625 nv_icmd(dev, 0x0000053c, 0xffff0000);
2626 nv_icmd(dev, 0x0000053d, 0xffff0000);
2627 nv_icmd(dev, 0x0000053e, 0xffff0000);
2628 nv_icmd(dev, 0x0000053f, 0xffff0000);
2629 nv_icmd(dev, 0x00000585, 0x0000003f);
2630 nv_icmd(dev, 0x00000576, 0x00000003);
2631 if (dev_priv->chipset == 0xc1 ||
2632 dev_priv->chipset == 0xd9)
2633 nv_icmd(dev, 0x0000057b, 0x00000059);
2634 nv_icmd(dev, 0x00000586, 0x00000040);
2635 nv_icmd(dev, 0x00000582, 0x00000080);
2636 nv_icmd(dev, 0x00000583, 0x00000080);
2637 nv_icmd(dev, 0x000005c2, 0x00000001);
2638 nv_icmd(dev, 0x00000638, 0x00000001);
2639 nv_icmd(dev, 0x00000639, 0x00000001);
2640 nv_icmd(dev, 0x0000063a, 0x00000002);
2641 nv_icmd(dev, 0x0000063b, 0x00000001);
2642 nv_icmd(dev, 0x0000063c, 0x00000001);
2643 nv_icmd(dev, 0x0000063d, 0x00000002);
2644 nv_icmd(dev, 0x0000063e, 0x00000001);
2645 nv_icmd(dev, 0x000008b8, 0x00000001);
2646 nv_icmd(dev, 0x000008b9, 0x00000001);
2647 nv_icmd(dev, 0x000008ba, 0x00000001);
2648 nv_icmd(dev, 0x000008bb, 0x00000001);
2649 nv_icmd(dev, 0x000008bc, 0x00000001);
2650 nv_icmd(dev, 0x000008bd, 0x00000001);
2651 nv_icmd(dev, 0x000008be, 0x00000001);
2652 nv_icmd(dev, 0x000008bf, 0x00000001);
2653 nv_icmd(dev, 0x00000900, 0x00000001);
2654 nv_icmd(dev, 0x00000901, 0x00000001);
2655 nv_icmd(dev, 0x00000902, 0x00000001);
2656 nv_icmd(dev, 0x00000903, 0x00000001);
2657 nv_icmd(dev, 0x00000904, 0x00000001);
2658 nv_icmd(dev, 0x00000905, 0x00000001);
2659 nv_icmd(dev, 0x00000906, 0x00000001);
2660 nv_icmd(dev, 0x00000907, 0x00000001);
2661 nv_icmd(dev, 0x00000908, 0x00000002);
2662 nv_icmd(dev, 0x00000909, 0x00000002);
2663 nv_icmd(dev, 0x0000090a, 0x00000002);
2664 nv_icmd(dev, 0x0000090b, 0x00000002);
2665 nv_icmd(dev, 0x0000090c, 0x00000002);
2666 nv_icmd(dev, 0x0000090d, 0x00000002);
2667 nv_icmd(dev, 0x0000090e, 0x00000002);
2668 nv_icmd(dev, 0x0000090f, 0x00000002);
2669 nv_icmd(dev, 0x00000910, 0x00000001);
2670 nv_icmd(dev, 0x00000911, 0x00000001);
2671 nv_icmd(dev, 0x00000912, 0x00000001);
2672 nv_icmd(dev, 0x00000913, 0x00000001);
2673 nv_icmd(dev, 0x00000914, 0x00000001);
2674 nv_icmd(dev, 0x00000915, 0x00000001);
2675 nv_icmd(dev, 0x00000916, 0x00000001);
2676 nv_icmd(dev, 0x00000917, 0x00000001);
2677 nv_icmd(dev, 0x00000918, 0x00000001);
2678 nv_icmd(dev, 0x00000919, 0x00000001);
2679 nv_icmd(dev, 0x0000091a, 0x00000001);
2680 nv_icmd(dev, 0x0000091b, 0x00000001);
2681 nv_icmd(dev, 0x0000091c, 0x00000001);
2682 nv_icmd(dev, 0x0000091d, 0x00000001);
2683 nv_icmd(dev, 0x0000091e, 0x00000001);
2684 nv_icmd(dev, 0x0000091f, 0x00000001);
2685 nv_icmd(dev, 0x00000920, 0x00000002);
2686 nv_icmd(dev, 0x00000921, 0x00000002);
2687 nv_icmd(dev, 0x00000922, 0x00000002);
2688 nv_icmd(dev, 0x00000923, 0x00000002);
2689 nv_icmd(dev, 0x00000924, 0x00000002);
2690 nv_icmd(dev, 0x00000925, 0x00000002);
2691 nv_icmd(dev, 0x00000926, 0x00000002);
2692 nv_icmd(dev, 0x00000927, 0x00000002);
2693 nv_icmd(dev, 0x00000928, 0x00000001);
2694 nv_icmd(dev, 0x00000929, 0x00000001);
2695 nv_icmd(dev, 0x0000092a, 0x00000001);
2696 nv_icmd(dev, 0x0000092b, 0x00000001);
2697 nv_icmd(dev, 0x0000092c, 0x00000001);
2698 nv_icmd(dev, 0x0000092d, 0x00000001);
2699 nv_icmd(dev, 0x0000092e, 0x00000001);
2700 nv_icmd(dev, 0x0000092f, 0x00000001);
2701 nv_icmd(dev, 0x00000648, 0x00000001);
2702 nv_icmd(dev, 0x00000649, 0x00000001);
2703 nv_icmd(dev, 0x0000064a, 0x00000001);
2704 nv_icmd(dev, 0x0000064b, 0x00000001);
2705 nv_icmd(dev, 0x0000064c, 0x00000001);
2706 nv_icmd(dev, 0x0000064d, 0x00000001);
2707 nv_icmd(dev, 0x0000064e, 0x00000001);
2708 nv_icmd(dev, 0x0000064f, 0x00000001);
2709 nv_icmd(dev, 0x00000650, 0x00000001);
2710 nv_icmd(dev, 0x00000658, 0x0000000f);
2711 nv_icmd(dev, 0x000007ff, 0x0000000a);
2712 nv_icmd(dev, 0x0000066a, 0x40000000);
2713 nv_icmd(dev, 0x0000066b, 0x10000000);
2714 nv_icmd(dev, 0x0000066c, 0xffff0000);
2715 nv_icmd(dev, 0x0000066d, 0xffff0000);
2716 nv_icmd(dev, 0x000007af, 0x00000008);
2717 nv_icmd(dev, 0x000007b0, 0x00000008);
2718 nv_icmd(dev, 0x000007f6, 0x00000001);
2719 nv_icmd(dev, 0x000006b2, 0x00000055);
2720 nv_icmd(dev, 0x000007ad, 0x00000003);
2721 nv_icmd(dev, 0x00000937, 0x00000001);
2722 nv_icmd(dev, 0x00000971, 0x00000008);
2723 nv_icmd(dev, 0x00000972, 0x00000040);
2724 nv_icmd(dev, 0x00000973, 0x0000012c);
2725 nv_icmd(dev, 0x0000097c, 0x00000040);
2726 nv_icmd(dev, 0x00000979, 0x00000003);
2727 nv_icmd(dev, 0x00000975, 0x00000020);
2728 nv_icmd(dev, 0x00000976, 0x00000001);
2729 nv_icmd(dev, 0x00000977, 0x00000020);
2730 nv_icmd(dev, 0x00000978, 0x00000001);
2731 nv_icmd(dev, 0x00000957, 0x00000003);
2732 nv_icmd(dev, 0x0000095e, 0x20164010);
2733 nv_icmd(dev, 0x0000095f, 0x00000020);
2734 if (dev_priv->chipset == 0xd9)
2735 nv_icmd(dev, 0x0000097d, 0x00000020);
2736 nv_icmd(dev, 0x00000683, 0x00000006);
2737 nv_icmd(dev, 0x00000685, 0x003fffff);
2738 nv_icmd(dev, 0x00000687, 0x00000c48);
2739 nv_icmd(dev, 0x000006a0, 0x00000005);
2740 nv_icmd(dev, 0x00000840, 0x00300008);
2741 nv_icmd(dev, 0x00000841, 0x04000080);
2742 nv_icmd(dev, 0x00000842, 0x00300008);
2743 nv_icmd(dev, 0x00000843, 0x04000080);
2744 nv_icmd(dev, 0x00000818, 0x00000000);
2745 nv_icmd(dev, 0x00000819, 0x00000000);
2746 nv_icmd(dev, 0x0000081a, 0x00000000);
2747 nv_icmd(dev, 0x0000081b, 0x00000000);
2748 nv_icmd(dev, 0x0000081c, 0x00000000);
2749 nv_icmd(dev, 0x0000081d, 0x00000000);
2750 nv_icmd(dev, 0x0000081e, 0x00000000);
2751 nv_icmd(dev, 0x0000081f, 0x00000000);
2752 nv_icmd(dev, 0x00000848, 0x00000000);
2753 nv_icmd(dev, 0x00000849, 0x00000000);
2754 nv_icmd(dev, 0x0000084a, 0x00000000);
2755 nv_icmd(dev, 0x0000084b, 0x00000000);
2756 nv_icmd(dev, 0x0000084c, 0x00000000);
2757 nv_icmd(dev, 0x0000084d, 0x00000000);
2758 nv_icmd(dev, 0x0000084e, 0x00000000);
2759 nv_icmd(dev, 0x0000084f, 0x00000000);
2760 nv_icmd(dev, 0x00000850, 0x00000000);
2761 nv_icmd(dev, 0x00000851, 0x00000000);
2762 nv_icmd(dev, 0x00000852, 0x00000000);
2763 nv_icmd(dev, 0x00000853, 0x00000000);
2764 nv_icmd(dev, 0x00000854, 0x00000000);
2765 nv_icmd(dev, 0x00000855, 0x00000000);
2766 nv_icmd(dev, 0x00000856, 0x00000000);
2767 nv_icmd(dev, 0x00000857, 0x00000000);
2768 nv_icmd(dev, 0x00000738, 0x00000000);
2769 nv_icmd(dev, 0x000006aa, 0x00000001);
2770 nv_icmd(dev, 0x000006ab, 0x00000002);
2771 nv_icmd(dev, 0x000006ac, 0x00000080);
2772 nv_icmd(dev, 0x000006ad, 0x00000100);
2773 nv_icmd(dev, 0x000006ae, 0x00000100);
2774 nv_icmd(dev, 0x000006b1, 0x00000011);
2775 nv_icmd(dev, 0x000006bb, 0x000000cf);
2776 nv_icmd(dev, 0x000006ce, 0x2a712488);
2777 nv_icmd(dev, 0x00000739, 0x4085c000);
2778 nv_icmd(dev, 0x0000073a, 0x00000080);
2779 nv_icmd(dev, 0x00000786, 0x80000100);
2780 nv_icmd(dev, 0x0000073c, 0x00010100);
2781 nv_icmd(dev, 0x0000073d, 0x02800000);
2782 nv_icmd(dev, 0x00000787, 0x000000cf);
2783 nv_icmd(dev, 0x0000078c, 0x00000008);
2784 nv_icmd(dev, 0x00000792, 0x00000001);
2785 nv_icmd(dev, 0x00000794, 0x00000001);
2786 nv_icmd(dev, 0x00000795, 0x00000001);
2787 nv_icmd(dev, 0x00000796, 0x00000001);
2788 nv_icmd(dev, 0x00000797, 0x000000cf);
2789 nv_icmd(dev, 0x00000836, 0x00000001);
2790 nv_icmd(dev, 0x0000079a, 0x00000002);
2791 nv_icmd(dev, 0x00000833, 0x04444480);
2792 nv_icmd(dev, 0x000007a1, 0x00000001);
2793 nv_icmd(dev, 0x000007a3, 0x00000001);
2794 nv_icmd(dev, 0x000007a4, 0x00000001);
2795 nv_icmd(dev, 0x000007a5, 0x00000001);
2796 nv_icmd(dev, 0x00000831, 0x00000004);
2797 nv_icmd(dev, 0x0000080c, 0x00000002);
2798 nv_icmd(dev, 0x0000080d, 0x00000100);
2799 nv_icmd(dev, 0x0000080e, 0x00000100);
2800 nv_icmd(dev, 0x0000080f, 0x00000001);
2801 nv_icmd(dev, 0x00000823, 0x00000002);
2802 nv_icmd(dev, 0x00000824, 0x00000100);
2803 nv_icmd(dev, 0x00000825, 0x00000100);
2804 nv_icmd(dev, 0x00000826, 0x00000001);
2805 nv_icmd(dev, 0x0000095d, 0x00000001);
2806 nv_icmd(dev, 0x0000082b, 0x00000004);
2807 nv_icmd(dev, 0x00000942, 0x00010001);
2808 nv_icmd(dev, 0x00000943, 0x00000001);
2809 nv_icmd(dev, 0x00000944, 0x00000022);
2810 nv_icmd(dev, 0x000007c5, 0x00010001);
2811 nv_icmd(dev, 0x00000834, 0x00000001);
2812 nv_icmd(dev, 0x000007c7, 0x00000001);
2813 nv_icmd(dev, 0x0000c1b0, 0x0000000f);
2814 nv_icmd(dev, 0x0000c1b1, 0x0000000f);
2815 nv_icmd(dev, 0x0000c1b2, 0x0000000f);
2816 nv_icmd(dev, 0x0000c1b3, 0x0000000f);
2817 nv_icmd(dev, 0x0000c1b4, 0x0000000f);
2818 nv_icmd(dev, 0x0000c1b5, 0x0000000f);
2819 nv_icmd(dev, 0x0000c1b6, 0x0000000f);
2820 nv_icmd(dev, 0x0000c1b7, 0x0000000f);
2821 nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
2822 nv_icmd(dev, 0x0000c1b9, 0x00fac688);
2823 nv_icmd(dev, 0x0001e100, 0x00000001);
2824 nv_icmd(dev, 0x00001000, 0x00000002);
2825 nv_icmd(dev, 0x000006aa, 0x00000001);
2826 nv_icmd(dev, 0x000006ad, 0x00000100);
2827 nv_icmd(dev, 0x000006ae, 0x00000100);
2828 nv_icmd(dev, 0x000006b1, 0x00000011);
2829 nv_icmd(dev, 0x0000078c, 0x00000008);
2830 nv_icmd(dev, 0x00000792, 0x00000001);
2831 nv_icmd(dev, 0x00000794, 0x00000001);
2832 nv_icmd(dev, 0x00000795, 0x00000001);
2833 nv_icmd(dev, 0x00000796, 0x00000001);
2834 nv_icmd(dev, 0x00000797, 0x000000cf);
2835 nv_icmd(dev, 0x0000079a, 0x00000002);
2836 nv_icmd(dev, 0x00000833, 0x04444480);
2837 nv_icmd(dev, 0x000007a1, 0x00000001);
2838 nv_icmd(dev, 0x000007a3, 0x00000001);
2839 nv_icmd(dev, 0x000007a4, 0x00000001);
2840 nv_icmd(dev, 0x000007a5, 0x00000001);
2841 nv_icmd(dev, 0x00000831, 0x00000004);
2842 nv_icmd(dev, 0x0001e100, 0x00000001);
2843 nv_icmd(dev, 0x00001000, 0x00000014);
2844 nv_icmd(dev, 0x00000351, 0x00000100);
2845 nv_icmd(dev, 0x00000957, 0x00000003);
2846 nv_icmd(dev, 0x0000095d, 0x00000001);
2847 nv_icmd(dev, 0x0000082b, 0x00000004);
2848 nv_icmd(dev, 0x00000942, 0x00010001);
2849 nv_icmd(dev, 0x00000943, 0x00000001);
2850 nv_icmd(dev, 0x000007c5, 0x00010001);
2851 nv_icmd(dev, 0x00000834, 0x00000001);
2852 nv_icmd(dev, 0x000007c7, 0x00000001);
2853 nv_icmd(dev, 0x0001e100, 0x00000001);
2854 nv_icmd(dev, 0x00001000, 0x00000001);
2855 nv_icmd(dev, 0x0000080c, 0x00000002);
2856 nv_icmd(dev, 0x0000080d, 0x00000100);
2857 nv_icmd(dev, 0x0000080e, 0x00000100);
2858 nv_icmd(dev, 0x0000080f, 0x00000001);
2859 nv_icmd(dev, 0x00000823, 0x00000002);
2860 nv_icmd(dev, 0x00000824, 0x00000100);
2861 nv_icmd(dev, 0x00000825, 0x00000100);
2862 nv_icmd(dev, 0x00000826, 0x00000001);
2863 nv_icmd(dev, 0x0001e100, 0x00000001);
2864 nv_wr32(dev, 0x400208, 0x00000000);
2865 nv_wr32(dev, 0x404154, 0x00000400);
2866
2867 nvc0_grctx_generate_9097(dev);
2868 if (fermi >= 0x9197)
2869 nvc0_grctx_generate_9197(dev);
2870 if (fermi >= 0x9297)
2871 nvc0_grctx_generate_9297(dev);
2872 nvc0_grctx_generate_902d(dev);
2873 nvc0_grctx_generate_9039(dev);
2874 nvc0_grctx_generate_90c0(dev);
2875
2876 nv_wr32(dev, 0x000260, r000260);
2877 return 0;
2878}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
deleted file mode 100644
index f5fac7cbb78d..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ /dev/null
@@ -1,223 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_vm.h"
29
30struct nvc0_instmem_priv {
31 struct nouveau_gpuobj *bar1_pgd;
32 struct nouveau_channel *bar1;
33 struct nouveau_gpuobj *bar3_pgd;
34 struct nouveau_channel *bar3;
35};
36
37int
38nvc0_instmem_suspend(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 dev_priv->ramin_available = false;
43 return 0;
44}
45
46void
47nvc0_instmem_resume(struct drm_device *dev)
48{
49 struct drm_nouveau_private *dev_priv = dev->dev_private;
50 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
51
52 nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
53 nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
54 nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
55 dev_priv->ramin_available = true;
56}
57
58static void
59nvc0_channel_del(struct nouveau_channel **pchan)
60{
61 struct nouveau_channel *chan;
62
63 chan = *pchan;
64 *pchan = NULL;
65 if (!chan)
66 return;
67
68 nouveau_vm_ref(NULL, &chan->vm, NULL);
69 if (drm_mm_initialized(&chan->ramin_heap))
70 drm_mm_takedown(&chan->ramin_heap);
71 nouveau_gpuobj_ref(NULL, &chan->ramin);
72 kfree(chan);
73}
74
75static int
76nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
77 struct nouveau_channel **pchan,
78 struct nouveau_gpuobj *pgd, u64 vm_size)
79{
80 struct nouveau_channel *chan;
81 int ret;
82
83 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
84 if (!chan)
85 return -ENOMEM;
86 chan->dev = dev;
87
88 ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
89 if (ret) {
90 nvc0_channel_del(&chan);
91 return ret;
92 }
93
94 ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
95 if (ret) {
96 nvc0_channel_del(&chan);
97 return ret;
98 }
99
100 ret = nouveau_vm_ref(vm, &chan->vm, NULL);
101 if (ret) {
102 nvc0_channel_del(&chan);
103 return ret;
104 }
105
106 nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
107 nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
108 nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
109 nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
110
111 *pchan = chan;
112 return 0;
113}
114
115int
116nvc0_instmem_init(struct drm_device *dev)
117{
118 struct drm_nouveau_private *dev_priv = dev->dev_private;
119 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
120 struct pci_dev *pdev = dev->pdev;
121 struct nvc0_instmem_priv *priv;
122 struct nouveau_vm *vm = NULL;
123 int ret;
124
125 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
126 if (!priv)
127 return -ENOMEM;
128 pinstmem->priv = priv;
129
130 /* BAR3 VM */
131 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
132 &dev_priv->bar3_vm);
133 if (ret)
134 goto error;
135
136 ret = nouveau_gpuobj_new(dev, NULL,
137 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
138 NVOBJ_FLAG_DONT_MAP |
139 NVOBJ_FLAG_ZERO_ALLOC,
140 &dev_priv->bar3_vm->pgt[0].obj[0]);
141 if (ret)
142 goto error;
143 dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
144
145 nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
146
147 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
148 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
149 if (ret)
150 goto error;
151
152 ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
153 if (ret)
154 goto error;
155 nouveau_vm_ref(NULL, &vm, NULL);
156
157 ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
158 priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
159 if (ret)
160 goto error;
161
162 /* BAR1 VM */
163 ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
164 if (ret)
165 goto error;
166
167 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
168 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
169 if (ret)
170 goto error;
171
172 ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
173 if (ret)
174 goto error;
175 nouveau_vm_ref(NULL, &vm, NULL);
176
177 ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
178 priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
179 if (ret)
180 goto error;
181
182 /* channel vm */
183 ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
184 &dev_priv->chan_vm);
185 if (ret)
186 goto error;
187
188 nvc0_instmem_resume(dev);
189 return 0;
190error:
191 nvc0_instmem_takedown(dev);
192 return ret;
193}
194
195void
196nvc0_instmem_takedown(struct drm_device *dev)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
200 struct nouveau_vm *vm = NULL;
201
202 nvc0_instmem_suspend(dev);
203
204 nv_wr32(dev, 0x1704, 0x00000000);
205 nv_wr32(dev, 0x1714, 0x00000000);
206
207 nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
208
209 nvc0_channel_del(&priv->bar1);
210 nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
211 nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
212
213 nvc0_channel_del(&priv->bar3);
214 nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
215 nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
216 nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
217 nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
218 nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
219
220 dev_priv->engine.instmem.priv = NULL;
221 kfree(priv);
222}
223
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 51cee2103544..0d34eb581179 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -22,18 +22,24 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <drm/drmP.h> 25#include "nouveau_drm.h"
26#include "nouveau_drv.h"
27#include "nouveau_bios.h" 26#include "nouveau_bios.h"
28#include "nouveau_pm.h" 27#include "nouveau_pm.h"
29 28
29#include <subdev/bios/pll.h>
30#include <subdev/bios.h>
31#include <subdev/clock.h>
32#include <subdev/timer.h>
33#include <subdev/fb.h>
34
30static u32 read_div(struct drm_device *, int, u32, u32); 35static u32 read_div(struct drm_device *, int, u32, u32);
31static u32 read_pll(struct drm_device *, u32); 36static u32 read_pll(struct drm_device *, u32);
32 37
33static u32 38static u32
34read_vco(struct drm_device *dev, u32 dsrc) 39read_vco(struct drm_device *dev, u32 dsrc)
35{ 40{
36 u32 ssrc = nv_rd32(dev, dsrc); 41 struct nouveau_device *device = nouveau_dev(dev);
42 u32 ssrc = nv_rd32(device, dsrc);
37 if (!(ssrc & 0x00000100)) 43 if (!(ssrc & 0x00000100))
38 return read_pll(dev, 0x00e800); 44 return read_pll(dev, 0x00e800);
39 return read_pll(dev, 0x00e820); 45 return read_pll(dev, 0x00e820);
@@ -42,8 +48,9 @@ read_vco(struct drm_device *dev, u32 dsrc)
42static u32 48static u32
43read_pll(struct drm_device *dev, u32 pll) 49read_pll(struct drm_device *dev, u32 pll)
44{ 50{
45 u32 ctrl = nv_rd32(dev, pll + 0); 51 struct nouveau_device *device = nouveau_dev(dev);
46 u32 coef = nv_rd32(dev, pll + 4); 52 u32 ctrl = nv_rd32(device, pll + 0);
53 u32 coef = nv_rd32(device, pll + 4);
47 u32 P = (coef & 0x003f0000) >> 16; 54 u32 P = (coef & 0x003f0000) >> 16;
48 u32 N = (coef & 0x0000ff00) >> 8; 55 u32 N = (coef & 0x0000ff00) >> 8;
49 u32 M = (coef & 0x000000ff) >> 0; 56 u32 M = (coef & 0x000000ff) >> 0;
@@ -83,8 +90,9 @@ read_pll(struct drm_device *dev, u32 pll)
83static u32 90static u32
84read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl) 91read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
85{ 92{
86 u32 ssrc = nv_rd32(dev, dsrc + (doff * 4)); 93 struct nouveau_device *device = nouveau_dev(dev);
87 u32 sctl = nv_rd32(dev, dctl + (doff * 4)); 94 u32 ssrc = nv_rd32(device, dsrc + (doff * 4));
95 u32 sctl = nv_rd32(device, dctl + (doff * 4));
88 96
89 switch (ssrc & 0x00000003) { 97 switch (ssrc & 0x00000003) {
90 case 0: 98 case 0:
@@ -109,7 +117,8 @@ read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
109static u32 117static u32
110read_mem(struct drm_device *dev) 118read_mem(struct drm_device *dev)
111{ 119{
112 u32 ssel = nv_rd32(dev, 0x1373f0); 120 struct nouveau_device *device = nouveau_dev(dev);
121 u32 ssel = nv_rd32(device, 0x1373f0);
113 if (ssel & 0x00000001) 122 if (ssel & 0x00000001)
114 return read_div(dev, 0, 0x137300, 0x137310); 123 return read_div(dev, 0, 0x137300, 0x137310);
115 return read_pll(dev, 0x132000); 124 return read_pll(dev, 0x132000);
@@ -118,8 +127,9 @@ read_mem(struct drm_device *dev)
118static u32 127static u32
119read_clk(struct drm_device *dev, int clk) 128read_clk(struct drm_device *dev, int clk)
120{ 129{
121 u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4)); 130 struct nouveau_device *device = nouveau_dev(dev);
122 u32 ssel = nv_rd32(dev, 0x137100); 131 u32 sctl = nv_rd32(device, 0x137250 + (clk * 4));
132 u32 ssel = nv_rd32(device, 0x137100);
123 u32 sclk, sdiv; 133 u32 sclk, sdiv;
124 134
125 if (ssel & (1 << clk)) { 135 if (ssel & (1 << clk)) {
@@ -212,10 +222,12 @@ calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
212static u32 222static u32
213calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef) 223calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
214{ 224{
215 struct pll_lims limits; 225 struct nouveau_device *device = nouveau_dev(dev);
226 struct nouveau_bios *bios = nouveau_bios(device);
227 struct nvbios_pll limits;
216 int N, M, P, ret; 228 int N, M, P, ret;
217 229
218 ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits); 230 ret = nvbios_pll_parse(bios, 0x137000 + (clk * 0x20), &limits);
219 if (ret) 231 if (ret)
220 return 0; 232 return 0;
221 233
@@ -308,31 +320,33 @@ calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
308static int 320static int
309calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq) 321calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
310{ 322{
311 struct pll_lims pll; 323 struct nouveau_device *device = nouveau_dev(dev);
324 struct nouveau_bios *bios = nouveau_bios(device);
325 struct nvbios_pll pll;
312 int N, M, P, ret; 326 int N, M, P, ret;
313 u32 ctrl; 327 u32 ctrl;
314 328
315 /* mclk pll input freq comes from another pll, make sure it's on */ 329 /* mclk pll input freq comes from another pll, make sure it's on */
316 ctrl = nv_rd32(dev, 0x132020); 330 ctrl = nv_rd32(device, 0x132020);
317 if (!(ctrl & 0x00000001)) { 331 if (!(ctrl & 0x00000001)) {
318 /* if not, program it to 567MHz. nfi where this value comes 332 /* if not, program it to 567MHz. nfi where this value comes
319 * from - it looks like it's in the pll limits table for 333 * from - it looks like it's in the pll limits table for
320 * 132000 but the binary driver ignores all my attempts to 334 * 132000 but the binary driver ignores all my attempts to
321 * change this value. 335 * change this value.
322 */ 336 */
323 nv_wr32(dev, 0x137320, 0x00000103); 337 nv_wr32(device, 0x137320, 0x00000103);
324 nv_wr32(dev, 0x137330, 0x81200606); 338 nv_wr32(device, 0x137330, 0x81200606);
325 nv_wait(dev, 0x132020, 0x00010000, 0x00010000); 339 nv_wait(device, 0x132020, 0x00010000, 0x00010000);
326 nv_wr32(dev, 0x132024, 0x0001150f); 340 nv_wr32(device, 0x132024, 0x0001150f);
327 nv_mask(dev, 0x132020, 0x00000001, 0x00000001); 341 nv_mask(device, 0x132020, 0x00000001, 0x00000001);
328 nv_wait(dev, 0x137390, 0x00020000, 0x00020000); 342 nv_wait(device, 0x137390, 0x00020000, 0x00020000);
329 nv_mask(dev, 0x132020, 0x00000004, 0x00000004); 343 nv_mask(device, 0x132020, 0x00000004, 0x00000004);
330 } 344 }
331 345
332 /* for the moment, until the clock tree is better understood, use 346 /* for the moment, until the clock tree is better understood, use
333 * pll mode for all clock frequencies 347 * pll mode for all clock frequencies
334 */ 348 */
335 ret = get_pll_limits(dev, 0x132000, &pll); 349 ret = nvbios_pll_parse(bios, 0x132000, &pll);
336 if (ret == 0) { 350 if (ret == 0) {
337 pll.refclk = read_pll(dev, 0x132020); 351 pll.refclk = read_pll(dev, 0x132020);
338 if (pll.refclk) { 352 if (pll.refclk) {
@@ -350,7 +364,7 @@ calc_mem(struct drm_device *dev, struct nvc0_pm_clock *info, u32 freq)
350void * 364void *
351nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) 365nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
352{ 366{
353 struct drm_nouveau_private *dev_priv = dev->dev_private; 367 struct nouveau_device *device = nouveau_dev(dev);
354 struct nvc0_pm_state *info; 368 struct nvc0_pm_state *info;
355 int ret; 369 int ret;
356 370
@@ -364,7 +378,7 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
364 * are always the same freq with the binary driver even when the 378 * are always the same freq with the binary driver even when the
365 * performance table says they should differ. 379 * performance table says they should differ.
366 */ 380 */
367 if (dev_priv->chipset == 0xd9) 381 if (device->chipset == 0xd9)
368 perflvl->rop = 0; 382 perflvl->rop = 0;
369 383
370 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) || 384 if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
@@ -394,38 +408,40 @@ nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
394static void 408static void
395prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info) 409prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
396{ 410{
411 struct nouveau_device *device = nouveau_dev(dev);
412
397 /* program dividers at 137160/1371d0 first */ 413 /* program dividers at 137160/1371d0 first */
398 if (clk < 7 && !info->ssel) { 414 if (clk < 7 && !info->ssel) {
399 nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv); 415 nv_mask(device, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
400 nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc); 416 nv_wr32(device, 0x137160 + (clk * 0x04), info->dsrc);
401 } 417 }
402 418
403 /* switch clock to non-pll mode */ 419 /* switch clock to non-pll mode */
404 nv_mask(dev, 0x137100, (1 << clk), 0x00000000); 420 nv_mask(device, 0x137100, (1 << clk), 0x00000000);
405 nv_wait(dev, 0x137100, (1 << clk), 0x00000000); 421 nv_wait(device, 0x137100, (1 << clk), 0x00000000);
406 422
407 /* reprogram pll */ 423 /* reprogram pll */
408 if (clk < 7) { 424 if (clk < 7) {
409 /* make sure it's disabled first... */ 425 /* make sure it's disabled first... */
410 u32 base = 0x137000 + (clk * 0x20); 426 u32 base = 0x137000 + (clk * 0x20);
411 u32 ctrl = nv_rd32(dev, base + 0x00); 427 u32 ctrl = nv_rd32(device, base + 0x00);
412 if (ctrl & 0x00000001) { 428 if (ctrl & 0x00000001) {
413 nv_mask(dev, base + 0x00, 0x00000004, 0x00000000); 429 nv_mask(device, base + 0x00, 0x00000004, 0x00000000);
414 nv_mask(dev, base + 0x00, 0x00000001, 0x00000000); 430 nv_mask(device, base + 0x00, 0x00000001, 0x00000000);
415 } 431 }
416 /* program it to new values, if necessary */ 432 /* program it to new values, if necessary */
417 if (info->ssel) { 433 if (info->ssel) {
418 nv_wr32(dev, base + 0x04, info->coef); 434 nv_wr32(device, base + 0x04, info->coef);
419 nv_mask(dev, base + 0x00, 0x00000001, 0x00000001); 435 nv_mask(device, base + 0x00, 0x00000001, 0x00000001);
420 nv_wait(dev, base + 0x00, 0x00020000, 0x00020000); 436 nv_wait(device, base + 0x00, 0x00020000, 0x00020000);
421 nv_mask(dev, base + 0x00, 0x00020004, 0x00000004); 437 nv_mask(device, base + 0x00, 0x00020004, 0x00000004);
422 } 438 }
423 } 439 }
424 440
425 /* select pll/non-pll mode, and program final clock divider */ 441 /* select pll/non-pll mode, and program final clock divider */
426 nv_mask(dev, 0x137100, (1 << clk), info->ssel); 442 nv_mask(device, 0x137100, (1 << clk), info->ssel);
427 nv_wait(dev, 0x137100, (1 << clk), info->ssel); 443 nv_wait(device, 0x137100, (1 << clk), info->ssel);
428 nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv); 444 nv_mask(device, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
429} 445}
430 446
431static void 447static void
@@ -441,7 +457,8 @@ mclk_refresh(struct nouveau_mem_exec_func *exec)
441static void 457static void
442mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable) 458mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
443{ 459{
444 nv_wr32(exec->dev, 0x10f210, enable ? 0x80000000 : 0x00000000); 460 struct nouveau_device *device = nouveau_dev(exec->dev);
461 nv_wr32(device, 0x10f210, enable ? 0x80000000 : 0x00000000);
445} 462}
446 463
447static void 464static void
@@ -458,83 +475,84 @@ mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
458static u32 475static u32
459mclk_mrg(struct nouveau_mem_exec_func *exec, int mr) 476mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
460{ 477{
461 struct drm_device *dev = exec->dev; 478 struct nouveau_device *device = nouveau_dev(exec->dev);
462 struct drm_nouveau_private *dev_priv = dev->dev_private; 479 struct nouveau_fb *pfb = nouveau_fb(device);
463 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) { 480 if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
464 if (mr <= 1) 481 if (mr <= 1)
465 return nv_rd32(dev, 0x10f300 + ((mr - 0) * 4)); 482 return nv_rd32(device, 0x10f300 + ((mr - 0) * 4));
466 return nv_rd32(dev, 0x10f320 + ((mr - 2) * 4)); 483 return nv_rd32(device, 0x10f320 + ((mr - 2) * 4));
467 } else { 484 } else {
468 if (mr == 0) 485 if (mr == 0)
469 return nv_rd32(dev, 0x10f300 + (mr * 4)); 486 return nv_rd32(device, 0x10f300 + (mr * 4));
470 else 487 else
471 if (mr <= 7) 488 if (mr <= 7)
472 return nv_rd32(dev, 0x10f32c + (mr * 4)); 489 return nv_rd32(device, 0x10f32c + (mr * 4));
473 return nv_rd32(dev, 0x10f34c); 490 return nv_rd32(device, 0x10f34c);
474 } 491 }
475} 492}
476 493
477static void 494static void
478mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data) 495mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
479{ 496{
480 struct drm_device *dev = exec->dev; 497 struct nouveau_device *device = nouveau_dev(exec->dev);
481 struct drm_nouveau_private *dev_priv = dev->dev_private; 498 struct nouveau_fb *pfb = nouveau_fb(device);
482 if (dev_priv->vram_type != NV_MEM_TYPE_GDDR5) { 499 if (pfb->ram.type != NV_MEM_TYPE_GDDR5) {
483 if (mr <= 1) { 500 if (mr <= 1) {
484 nv_wr32(dev, 0x10f300 + ((mr - 0) * 4), data); 501 nv_wr32(device, 0x10f300 + ((mr - 0) * 4), data);
485 if (dev_priv->vram_rank_B) 502 if (pfb->ram.ranks > 1)
486 nv_wr32(dev, 0x10f308 + ((mr - 0) * 4), data); 503 nv_wr32(device, 0x10f308 + ((mr - 0) * 4), data);
487 } else 504 } else
488 if (mr <= 3) { 505 if (mr <= 3) {
489 nv_wr32(dev, 0x10f320 + ((mr - 2) * 4), data); 506 nv_wr32(device, 0x10f320 + ((mr - 2) * 4), data);
490 if (dev_priv->vram_rank_B) 507 if (pfb->ram.ranks > 1)
491 nv_wr32(dev, 0x10f328 + ((mr - 2) * 4), data); 508 nv_wr32(device, 0x10f328 + ((mr - 2) * 4), data);
492 } 509 }
493 } else { 510 } else {
494 if (mr == 0) nv_wr32(dev, 0x10f300 + (mr * 4), data); 511 if (mr == 0) nv_wr32(device, 0x10f300 + (mr * 4), data);
495 else if (mr <= 7) nv_wr32(dev, 0x10f32c + (mr * 4), data); 512 else if (mr <= 7) nv_wr32(device, 0x10f32c + (mr * 4), data);
496 else if (mr == 15) nv_wr32(dev, 0x10f34c, data); 513 else if (mr == 15) nv_wr32(device, 0x10f34c, data);
497 } 514 }
498} 515}
499 516
500static void 517static void
501mclk_clock_set(struct nouveau_mem_exec_func *exec) 518mclk_clock_set(struct nouveau_mem_exec_func *exec)
502{ 519{
520 struct nouveau_device *device = nouveau_dev(exec->dev);
503 struct nvc0_pm_state *info = exec->priv; 521 struct nvc0_pm_state *info = exec->priv;
504 struct drm_device *dev = exec->dev; 522 u32 ctrl = nv_rd32(device, 0x132000);
505 u32 ctrl = nv_rd32(dev, 0x132000);
506 523
507 nv_wr32(dev, 0x137360, 0x00000001); 524 nv_wr32(device, 0x137360, 0x00000001);
508 nv_wr32(dev, 0x137370, 0x00000000); 525 nv_wr32(device, 0x137370, 0x00000000);
509 nv_wr32(dev, 0x137380, 0x00000000); 526 nv_wr32(device, 0x137380, 0x00000000);
510 if (ctrl & 0x00000001) 527 if (ctrl & 0x00000001)
511 nv_wr32(dev, 0x132000, (ctrl &= ~0x00000001)); 528 nv_wr32(device, 0x132000, (ctrl &= ~0x00000001));
512 529
513 nv_wr32(dev, 0x132004, info->mem.coef); 530 nv_wr32(device, 0x132004, info->mem.coef);
514 nv_wr32(dev, 0x132000, (ctrl |= 0x00000001)); 531 nv_wr32(device, 0x132000, (ctrl |= 0x00000001));
515 nv_wait(dev, 0x137390, 0x00000002, 0x00000002); 532 nv_wait(device, 0x137390, 0x00000002, 0x00000002);
516 nv_wr32(dev, 0x132018, 0x00005000); 533 nv_wr32(device, 0x132018, 0x00005000);
517 534
518 nv_wr32(dev, 0x137370, 0x00000001); 535 nv_wr32(device, 0x137370, 0x00000001);
519 nv_wr32(dev, 0x137380, 0x00000001); 536 nv_wr32(device, 0x137380, 0x00000001);
520 nv_wr32(dev, 0x137360, 0x00000000); 537 nv_wr32(device, 0x137360, 0x00000000);
521} 538}
522 539
523static void 540static void
524mclk_timing_set(struct nouveau_mem_exec_func *exec) 541mclk_timing_set(struct nouveau_mem_exec_func *exec)
525{ 542{
543 struct nouveau_device *device = nouveau_dev(exec->dev);
526 struct nvc0_pm_state *info = exec->priv; 544 struct nvc0_pm_state *info = exec->priv;
527 struct nouveau_pm_level *perflvl = info->perflvl; 545 struct nouveau_pm_level *perflvl = info->perflvl;
528 int i; 546 int i;
529 547
530 for (i = 0; i < 5; i++) 548 for (i = 0; i < 5; i++)
531 nv_wr32(exec->dev, 0x10f290 + (i * 4), perflvl->timing.reg[i]); 549 nv_wr32(device, 0x10f290 + (i * 4), perflvl->timing.reg[i]);
532} 550}
533 551
534static void 552static void
535prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) 553prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
536{ 554{
537 struct drm_nouveau_private *dev_priv = dev->dev_private; 555 struct nouveau_device *device = nouveau_dev(dev);
538 struct nouveau_mem_exec_func exec = { 556 struct nouveau_mem_exec_func exec = {
539 .dev = dev, 557 .dev = dev,
540 .precharge = mclk_precharge, 558 .precharge = mclk_precharge,
@@ -549,17 +567,17 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
549 .priv = info 567 .priv = info
550 }; 568 };
551 569
552 if (dev_priv->chipset < 0xd0) 570 if (device->chipset < 0xd0)
553 nv_wr32(dev, 0x611200, 0x00003300); 571 nv_wr32(device, 0x611200, 0x00003300);
554 else 572 else
555 nv_wr32(dev, 0x62c000, 0x03030000); 573 nv_wr32(device, 0x62c000, 0x03030000);
556 574
557 nouveau_mem_exec(&exec, info->perflvl); 575 nouveau_mem_exec(&exec, info->perflvl);
558 576
559 if (dev_priv->chipset < 0xd0) 577 if (device->chipset < 0xd0)
560 nv_wr32(dev, 0x611200, 0x00003330); 578 nv_wr32(device, 0x611200, 0x00003330);
561 else 579 else
562 nv_wr32(dev, 0x62c000, 0x03030300); 580 nv_wr32(device, 0x62c000, 0x03030300);
563} 581}
564int 582int
565nvc0_pm_clocks_set(struct drm_device *dev, void *data) 583nvc0_pm_clocks_set(struct drm_device *dev, void *data)
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
deleted file mode 100644
index 940652e7fafa..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_software.c
+++ /dev/null
@@ -1,153 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_ramht.h"
29#include "nouveau_software.h"
30
31#include "nv50_display.h"
32
33struct nvc0_software_priv {
34 struct nouveau_software_priv base;
35};
36
37struct nvc0_software_chan {
38 struct nouveau_software_chan base;
39 struct nouveau_vma dispc_vma[4];
40};
41
42u64
43nvc0_software_crtc(struct nouveau_channel *chan, int crtc)
44{
45 struct nvc0_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
46 return pch->dispc_vma[crtc].offset;
47}
48
49static int
50nvc0_software_context_new(struct nouveau_channel *chan, int engine)
51{
52 struct drm_device *dev = chan->dev;
53 struct drm_nouveau_private *dev_priv = dev->dev_private;
54 struct nvc0_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
55 struct nvc0_software_chan *pch;
56 int ret = 0, i;
57
58 pch = kzalloc(sizeof(*pch), GFP_KERNEL);
59 if (!pch)
60 return -ENOMEM;
61
62 nouveau_software_context_new(&pch->base);
63 chan->engctx[engine] = pch;
64
65 /* map display semaphore buffers into channel's vm */
66 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
67 struct nouveau_bo *bo;
68 if (dev_priv->card_type >= NV_D0)
69 bo = nvd0_display_crtc_sema(dev, i);
70 else
71 bo = nv50_display(dev)->crtc[i].sem.bo;
72
73 ret = nouveau_bo_vma_add(bo, chan->vm, &pch->dispc_vma[i]);
74 }
75
76 if (ret)
77 psw->base.base.context_del(chan, engine);
78 return ret;
79}
80
81static void
82nvc0_software_context_del(struct nouveau_channel *chan, int engine)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nvc0_software_chan *pch = chan->engctx[engine];
87 int i;
88
89 if (dev_priv->card_type >= NV_D0) {
90 for (i = 0; i < dev->mode_config.num_crtc; i++) {
91 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
92 nouveau_bo_vma_del(bo, &pch->dispc_vma[i]);
93 }
94 } else
95 if (dev_priv->card_type >= NV_50) {
96 struct nv50_display *disp = nv50_display(dev);
97 for (i = 0; i < dev->mode_config.num_crtc; i++) {
98 struct nv50_display_crtc *dispc = &disp->crtc[i];
99 nouveau_bo_vma_del(dispc->sem.bo, &pch->dispc_vma[i]);
100 }
101 }
102
103 chan->engctx[engine] = NULL;
104 kfree(pch);
105}
106
107static int
108nvc0_software_object_new(struct nouveau_channel *chan, int engine,
109 u32 handle, u16 class)
110{
111 return 0;
112}
113
114static int
115nvc0_software_init(struct drm_device *dev, int engine)
116{
117 return 0;
118}
119
120static int
121nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
122{
123 return 0;
124}
125
126static void
127nvc0_software_destroy(struct drm_device *dev, int engine)
128{
129 struct nvc0_software_priv *psw = nv_engine(dev, engine);
130
131 NVOBJ_ENGINE_DEL(dev, SW);
132 kfree(psw);
133}
134
135int
136nvc0_software_create(struct drm_device *dev)
137{
138 struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
139 if (!psw)
140 return -ENOMEM;
141
142 psw->base.base.destroy = nvc0_software_destroy;
143 psw->base.base.init = nvc0_software_init;
144 psw->base.base.fini = nvc0_software_fini;
145 psw->base.base.context_new = nvc0_software_context_new;
146 psw->base.base.context_del = nvc0_software_context_del;
147 psw->base.base.object_new = nvc0_software_object_new;
148 nouveau_software_create(&psw->base);
149
150 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
151 NVOBJ_CLASS(dev, 0x906e, SW);
152 return 0;
153}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
deleted file mode 100644
index 4d62a1d95782..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ /dev/null
@@ -1,160 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28
29/* 0 = unsupported
30 * 1 = non-compressed
31 * 3 = compressed
32 */
33static const u8 types[256] = {
34 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
35 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
36 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
37 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
38 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
39 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
40 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
42 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
50};
51
52bool
53nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
54{
55 u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
56 return likely((types[memtype] == 1));
57}
58
59int
60nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
61 u32 type, struct nouveau_mem **pmem)
62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
65 struct nouveau_mm_node *r;
66 struct nouveau_mem *mem;
67 int ret;
68
69 size >>= 12;
70 align >>= 12;
71 ncmin >>= 12;
72
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74 if (!mem)
75 return -ENOMEM;
76
77 INIT_LIST_HEAD(&mem->regions);
78 mem->dev = dev_priv->dev;
79 mem->memtype = (type & 0xff);
80 mem->size = size;
81
82 mutex_lock(&mm->mutex);
83 do {
84 ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
85 if (ret) {
86 mutex_unlock(&mm->mutex);
87 nv50_vram_del(dev, &mem);
88 return ret;
89 }
90
91 list_add_tail(&r->rl_entry, &mem->regions);
92 size -= r->length;
93 } while (size);
94 mutex_unlock(&mm->mutex);
95
96 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
97 mem->offset = (u64)r->offset << 12;
98 *pmem = mem;
99 return 0;
100}
101
102int
103nvc0_vram_init(struct drm_device *dev)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
107 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
108 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
109 u32 parts = nv_rd32(dev, 0x022438);
110 u32 pmask = nv_rd32(dev, 0x022554);
111 u32 bsize = nv_rd32(dev, 0x10f20c);
112 u32 offset, length;
113 bool uniform = true;
114 int ret, part;
115
116 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
117 NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
118
119 dev_priv->vram_type = nouveau_mem_vbios_type(dev);
120 dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
121
122 /* read amount of vram attached to each memory controller */
123 for (part = 0; part < parts; part++) {
124 if (!(pmask & (1 << part))) {
125 u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
126 if (psize != bsize) {
127 if (psize < bsize)
128 bsize = psize;
129 uniform = false;
130 }
131
132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
133 dev_priv->vram_size += (u64)psize << 20;
134 }
135 }
136
137 /* if all controllers have the same amount attached, there's no holes */
138 if (uniform) {
139 offset = rsvd_head;
140 length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
141 return nouveau_mm_init(&vram->mm, offset, length, 1);
142 }
143
144 /* otherwise, address lowest common amount from 0GiB */
145 ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
146 if (ret)
147 return ret;
148
149 /* and the rest starting from (8GiB + common_size) */
150 offset = (0x0200000000ULL >> 12) + (bsize << 8);
151 length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
152
153 ret = nouveau_mm_init(&vram->mm, offset, length, 0);
154 if (ret) {
155 nouveau_mm_fini(&vram->mm);
156 return ret;
157 }
158
159 return 0;
160}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 4b44a3250d4b..c402fca2b2b8 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -27,15 +27,21 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32#include "nouveau_gem.h"
31#include "nouveau_connector.h" 33#include "nouveau_connector.h"
32#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
33#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
34#include "nouveau_dma.h" 36#include "nouveau_fence.h"
35#include "nouveau_fb.h"
36#include "nouveau_software.h"
37#include "nv50_display.h" 37#include "nv50_display.h"
38 38
39#include <core/gpuobj.h>
40
41#include <subdev/timer.h>
42#include <subdev/bar.h>
43#include <subdev/fb.h>
44
39#define EVO_DMA_NR 9 45#define EVO_DMA_NR 9
40 46
41#define EVO_MASTER (0x00) 47#define EVO_MASTER (0x00)
@@ -72,8 +78,7 @@ struct nvd0_display {
72static struct nvd0_display * 78static struct nvd0_display *
73nvd0_display(struct drm_device *dev) 79nvd0_display(struct drm_device *dev)
74{ 80{
75 struct drm_nouveau_private *dev_priv = dev->dev_private; 81 return nouveau_display(dev)->priv;
76 return dev_priv->engine.display.priv;
77} 82}
78 83
79static struct drm_crtc * 84static struct drm_crtc *
@@ -88,55 +93,47 @@ nvd0_display_crtc_get(struct drm_encoder *encoder)
88static inline int 93static inline int
89evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data) 94evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
90{ 95{
96 struct nouveau_device *device = nouveau_dev(dev);
91 int ret = 0; 97 int ret = 0;
92 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001); 98 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
93 nv_wr32(dev, 0x610704 + (id * 0x10), data); 99 nv_wr32(device, 0x610704 + (id * 0x10), data);
94 nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd); 100 nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
95 if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000)) 101 if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
96 ret = -EBUSY; 102 ret = -EBUSY;
97 nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000); 103 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
98 return ret; 104 return ret;
99} 105}
100 106
101static u32 * 107static u32 *
102evo_wait(struct drm_device *dev, int id, int nr) 108evo_wait(struct drm_device *dev, int id, int nr)
103{ 109{
110 struct nouveau_device *device = nouveau_dev(dev);
111 struct nouveau_drm *drm = nouveau_drm(dev);
104 struct nvd0_display *disp = nvd0_display(dev); 112 struct nvd0_display *disp = nvd0_display(dev);
105 u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4; 113 u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
106 114
107 if (put + nr >= (PAGE_SIZE / 4)) { 115 if (put + nr >= (PAGE_SIZE / 4)) {
108 disp->evo[id].ptr[put] = 0x20000000; 116 disp->evo[id].ptr[put] = 0x20000000;
109 117
110 nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000); 118 nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
111 if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) { 119 if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
112 NV_ERROR(dev, "evo %d dma stalled\n", id); 120 NV_ERROR(drm, "evo %d dma stalled\n", id);
113 return NULL; 121 return NULL;
114 } 122 }
115 123
116 put = 0; 124 put = 0;
117 } 125 }
118 126
119 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
120 NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
121
122 return disp->evo[id].ptr + put; 127 return disp->evo[id].ptr + put;
123} 128}
124 129
125static void 130static void
126evo_kick(u32 *push, struct drm_device *dev, int id) 131evo_kick(u32 *push, struct drm_device *dev, int id)
127{ 132{
133 struct nouveau_device *device = nouveau_dev(dev);
128 struct nvd0_display *disp = nvd0_display(dev); 134 struct nvd0_display *disp = nvd0_display(dev);
129 135
130 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) { 136 nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
131 u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
132 u32 *cur = disp->evo[id].ptr + curp;
133
134 while (cur < push)
135 NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
136 NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
137 }
138
139 nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
140} 137}
141 138
142#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) 139#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -145,6 +142,8 @@ evo_kick(u32 *push, struct drm_device *dev, int id)
145static int 142static int
146evo_init_dma(struct drm_device *dev, int ch) 143evo_init_dma(struct drm_device *dev, int ch)
147{ 144{
145 struct nouveau_device *device = nouveau_dev(dev);
146 struct nouveau_drm *drm = nouveau_drm(dev);
148 struct nvd0_display *disp = nvd0_display(dev); 147 struct nvd0_display *disp = nvd0_display(dev);
149 u32 flags; 148 u32 flags;
150 149
@@ -152,68 +151,76 @@ evo_init_dma(struct drm_device *dev, int ch)
152 if (ch == EVO_MASTER) 151 if (ch == EVO_MASTER)
153 flags |= 0x01000000; 152 flags |= 0x01000000;
154 153
155 nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3); 154 nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
156 nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000); 155 nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
157 nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001); 156 nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
158 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); 157 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
159 nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000); 158 nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
160 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags); 159 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
161 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) { 160 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
162 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, 161 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
163 nv_rd32(dev, 0x610490 + (ch * 0x0010))); 162 nv_rd32(device, 0x610490 + (ch * 0x0010)));
164 return -EBUSY; 163 return -EBUSY;
165 } 164 }
166 165
167 nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); 166 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
168 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); 167 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
169 return 0; 168 return 0;
170} 169}
171 170
172static void 171static void
173evo_fini_dma(struct drm_device *dev, int ch) 172evo_fini_dma(struct drm_device *dev, int ch)
174{ 173{
175 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010)) 174 struct nouveau_device *device = nouveau_dev(dev);
175
176 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
176 return; 177 return;
177 178
178 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000); 179 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
179 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000); 180 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
180 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000); 181 nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
181 nv_mask(dev, 0x610090, (1 << ch), 0x00000000); 182 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
182 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); 183 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
183} 184}
184 185
185static inline void 186static inline void
186evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data) 187evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
187{ 188{
188 nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data); 189 struct nouveau_device *device = nouveau_dev(dev);
190 nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
189} 191}
190 192
191static int 193static int
192evo_init_pio(struct drm_device *dev, int ch) 194evo_init_pio(struct drm_device *dev, int ch)
193{ 195{
194 nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001); 196 struct nouveau_device *device = nouveau_dev(dev);
195 if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) { 197 struct nouveau_drm *drm = nouveau_drm(dev);
196 NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch, 198
197 nv_rd32(dev, 0x610490 + (ch * 0x0010))); 199 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
200 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
201 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
202 nv_rd32(device, 0x610490 + (ch * 0x0010)));
198 return -EBUSY; 203 return -EBUSY;
199 } 204 }
200 205
201 nv_mask(dev, 0x610090, (1 << ch), (1 << ch)); 206 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
202 nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch)); 207 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
203 return 0; 208 return 0;
204} 209}
205 210
206static void 211static void
207evo_fini_pio(struct drm_device *dev, int ch) 212evo_fini_pio(struct drm_device *dev, int ch)
208{ 213{
209 if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001)) 214 struct nouveau_device *device = nouveau_dev(dev);
215
216 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
210 return; 217 return;
211 218
212 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010); 219 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
213 nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000); 220 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
214 nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000); 221 nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
215 nv_mask(dev, 0x610090, (1 << ch), 0x00000000); 222 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
216 nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000); 223 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
217} 224}
218 225
219static bool 226static bool
@@ -225,6 +232,7 @@ evo_sync_wait(void *data)
225static int 232static int
226evo_sync(struct drm_device *dev, int ch) 233evo_sync(struct drm_device *dev, int ch)
227{ 234{
235 struct nouveau_device *device = nouveau_dev(dev);
228 struct nvd0_display *disp = nvd0_display(dev); 236 struct nvd0_display *disp = nvd0_display(dev);
229 u32 *push = evo_wait(dev, ch, 8); 237 u32 *push = evo_wait(dev, ch, 8);
230 if (push) { 238 if (push) {
@@ -235,7 +243,7 @@ evo_sync(struct drm_device *dev, int ch)
235 evo_data(push, 0x00000000); 243 evo_data(push, 0x00000000);
236 evo_data(push, 0x00000000); 244 evo_data(push, 0x00000000);
237 evo_kick(push, dev, ch); 245 evo_kick(push, dev, ch);
238 if (nv_wait_cb(dev, evo_sync_wait, disp->sync)) 246 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
239 return 0; 247 return 0;
240 } 248 }
241 249
@@ -300,7 +308,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
300 return ret; 308 return ret;
301 309
302 310
303 offset = nvc0_software_crtc(chan, nv_crtc->index); 311 offset = nvc0_fence_crtc(chan, nv_crtc->index);
304 offset += evo->sem.offset; 312 offset += evo->sem.offset;
305 313
306 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 314 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
@@ -363,7 +371,7 @@ nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
363static int 371static int
364nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update) 372nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
365{ 373{
366 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private; 374 struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
367 struct drm_device *dev = nv_crtc->base.dev; 375 struct drm_device *dev = nv_crtc->base.dev;
368 struct nouveau_connector *nv_connector; 376 struct nouveau_connector *nv_connector;
369 struct drm_connector *connector; 377 struct drm_connector *connector;
@@ -386,7 +394,7 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
386 mode |= nv_connector->dithering_depth; 394 mode |= nv_connector->dithering_depth;
387 } 395 }
388 396
389 if (dev_priv->card_type < NV_E0) 397 if (nv_device(drm->device)->card_type < NV_E0)
390 mthd = 0x0490 + (nv_crtc->index * 0x0300); 398 mthd = 0x0490 + (nv_crtc->index * 0x0300);
391 else 399 else
392 mthd = 0x04a0 + (nv_crtc->index * 0x0300); 400 mthd = 0x04a0 + (nv_crtc->index * 0x0300);
@@ -701,11 +709,12 @@ static int
701nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 709nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
702 struct drm_framebuffer *old_fb) 710 struct drm_framebuffer *old_fb)
703{ 711{
712 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 713 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705 int ret; 714 int ret;
706 715
707 if (!crtc->fb) { 716 if (!crtc->fb) {
708 NV_DEBUG_KMS(crtc->dev, "No FB bound\n"); 717 NV_DEBUG(drm, "No FB bound\n");
709 return 0; 718 return 0;
710 } 719 }
711 720
@@ -923,6 +932,7 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
923{ 932{
924 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 933 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
925 struct drm_device *dev = encoder->dev; 934 struct drm_device *dev = encoder->dev;
935 struct nouveau_device *device = nouveau_dev(dev);
926 int or = nv_encoder->or; 936 int or = nv_encoder->or;
927 u32 dpms_ctrl; 937 u32 dpms_ctrl;
928 938
@@ -932,9 +942,9 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
932 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF) 942 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
933 dpms_ctrl |= 0x00000004; 943 dpms_ctrl |= 0x00000004;
934 944
935 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); 945 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
936 nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl); 946 nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
937 nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000); 947 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
938} 948}
939 949
940static bool 950static bool
@@ -1025,18 +1035,19 @@ nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1025 enum drm_connector_status status = connector_status_disconnected; 1035 enum drm_connector_status status = connector_status_disconnected;
1026 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1036 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1027 struct drm_device *dev = encoder->dev; 1037 struct drm_device *dev = encoder->dev;
1038 struct nouveau_device *device = nouveau_dev(dev);
1028 int or = nv_encoder->or; 1039 int or = nv_encoder->or;
1029 u32 load; 1040 u32 load;
1030 1041
1031 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000); 1042 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
1032 udelay(9500); 1043 udelay(9500);
1033 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000); 1044 nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
1034 1045
1035 load = nv_rd32(dev, 0x61a00c + (or * 0x800)); 1046 load = nv_rd32(device, 0x61a00c + (or * 0x800));
1036 if ((load & 0x38000000) == 0x38000000) 1047 if ((load & 0x38000000) == 0x38000000)
1037 status = connector_status_connected; 1048 status = connector_status_connected;
1038 1049
1039 nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000); 1050 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
1040 return status; 1051 return status;
1041} 1052}
1042 1053
@@ -1063,7 +1074,7 @@ static const struct drm_encoder_funcs nvd0_dac_func = {
1063}; 1074};
1064 1075
1065static int 1076static int
1066nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe) 1077nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1067{ 1078{
1068 struct drm_device *dev = connector->dev; 1079 struct drm_device *dev = connector->dev;
1069 struct nouveau_encoder *nv_encoder; 1080 struct nouveau_encoder *nv_encoder;
@@ -1094,24 +1105,25 @@ nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1094 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1095 struct nouveau_connector *nv_connector; 1106 struct nouveau_connector *nv_connector;
1096 struct drm_device *dev = encoder->dev; 1107 struct drm_device *dev = encoder->dev;
1108 struct nouveau_device *device = nouveau_dev(dev);
1097 int i, or = nv_encoder->or * 0x30; 1109 int i, or = nv_encoder->or * 0x30;
1098 1110
1099 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1111 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1100 if (!drm_detect_monitor_audio(nv_connector->edid)) 1112 if (!drm_detect_monitor_audio(nv_connector->edid))
1101 return; 1113 return;
1102 1114
1103 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001); 1115 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
1104 1116
1105 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1117 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1106 if (nv_connector->base.eld[0]) { 1118 if (nv_connector->base.eld[0]) {
1107 u8 *eld = nv_connector->base.eld; 1119 u8 *eld = nv_connector->base.eld;
1108 1120
1109 for (i = 0; i < eld[2] * 4; i++) 1121 for (i = 0; i < eld[2] * 4; i++)
1110 nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]); 1122 nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
1111 for (i = eld[2] * 4; i < 0x60; i++) 1123 for (i = eld[2] * 4; i < 0x60; i++)
1112 nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00); 1124 nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
1113 1125
1114 nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002); 1126 nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
1115 } 1127 }
1116} 1128}
1117 1129
@@ -1120,9 +1132,10 @@ nvd0_audio_disconnect(struct drm_encoder *encoder)
1120{ 1132{
1121 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1133 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1122 struct drm_device *dev = encoder->dev; 1134 struct drm_device *dev = encoder->dev;
1135 struct nouveau_device *device = nouveau_dev(dev);
1123 int or = nv_encoder->or * 0x30; 1136 int or = nv_encoder->or * 0x30;
1124 1137
1125 nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000); 1138 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
1126} 1139}
1127 1140
1128/****************************************************************************** 1141/******************************************************************************
@@ -1135,6 +1148,7 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1135 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1148 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1136 struct nouveau_connector *nv_connector; 1149 struct nouveau_connector *nv_connector;
1137 struct drm_device *dev = encoder->dev; 1150 struct drm_device *dev = encoder->dev;
1151 struct nouveau_device *device = nouveau_dev(dev);
1138 int head = nv_crtc->index * 0x800; 1152 int head = nv_crtc->index * 0x800;
1139 u32 rekey = 56; /* binary driver, and tegra constant */ 1153 u32 rekey = 56; /* binary driver, and tegra constant */
1140 u32 max_ac_packet; 1154 u32 max_ac_packet;
@@ -1149,25 +1163,25 @@ nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1149 max_ac_packet /= 32; 1163 max_ac_packet /= 32;
1150 1164
1151 /* AVI InfoFrame */ 1165 /* AVI InfoFrame */
1152 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); 1166 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1153 nv_wr32(dev, 0x61671c + head, 0x000d0282); 1167 nv_wr32(device, 0x61671c + head, 0x000d0282);
1154 nv_wr32(dev, 0x616720 + head, 0x0000006f); 1168 nv_wr32(device, 0x616720 + head, 0x0000006f);
1155 nv_wr32(dev, 0x616724 + head, 0x00000000); 1169 nv_wr32(device, 0x616724 + head, 0x00000000);
1156 nv_wr32(dev, 0x616728 + head, 0x00000000); 1170 nv_wr32(device, 0x616728 + head, 0x00000000);
1157 nv_wr32(dev, 0x61672c + head, 0x00000000); 1171 nv_wr32(device, 0x61672c + head, 0x00000000);
1158 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001); 1172 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
1159 1173
1160 /* ??? InfoFrame? */ 1174 /* ??? InfoFrame? */
1161 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); 1175 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1162 nv_wr32(dev, 0x6167ac + head, 0x00000010); 1176 nv_wr32(device, 0x6167ac + head, 0x00000010);
1163 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001); 1177 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
1164 1178
1165 /* HDMI_CTRL */ 1179 /* HDMI_CTRL */
1166 nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey | 1180 nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
1167 max_ac_packet << 16); 1181 max_ac_packet << 16);
1168 1182
1169 /* NFI, audio doesn't work without it though.. */ 1183 /* NFI, audio doesn't work without it though.. */
1170 nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000); 1184 nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
1171 1185
1172 nvd0_audio_mode_set(encoder, mode); 1186 nvd0_audio_mode_set(encoder, mode);
1173} 1187}
@@ -1178,37 +1192,41 @@ nvd0_hdmi_disconnect(struct drm_encoder *encoder)
1178 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1192 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1179 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc); 1193 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1180 struct drm_device *dev = encoder->dev; 1194 struct drm_device *dev = encoder->dev;
1195 struct nouveau_device *device = nouveau_dev(dev);
1181 int head = nv_crtc->index * 0x800; 1196 int head = nv_crtc->index * 0x800;
1182 1197
1183 nvd0_audio_disconnect(encoder); 1198 nvd0_audio_disconnect(encoder);
1184 1199
1185 nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000); 1200 nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
1186 nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000); 1201 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1187 nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000); 1202 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1188} 1203}
1189 1204
1190/****************************************************************************** 1205/******************************************************************************
1191 * SOR 1206 * SOR
1192 *****************************************************************************/ 1207 *****************************************************************************/
1193static inline u32 1208static inline u32
1194nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane) 1209nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
1195{ 1210{
1196 static const u8 nvd0[] = { 16, 8, 0, 24 }; 1211 static const u8 nvd0[] = { 16, 8, 0, 24 };
1197 return nvd0[lane]; 1212 return nvd0[lane];
1198} 1213}
1199 1214
1200static void 1215static void
1201nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern) 1216nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
1202{ 1217{
1218 struct nouveau_device *device = nouveau_dev(dev);
1203 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1219 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1204 const u32 loff = (or * 0x800) + (link * 0x80); 1220 const u32 loff = (or * 0x800) + (link * 0x80);
1205 nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); 1221 nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1206} 1222}
1207 1223
1208static void 1224static void
1209nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb, 1225nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
1210 u8 lane, u8 swing, u8 preem) 1226 u8 lane, u8 swing, u8 preem)
1211{ 1227{
1228 struct nouveau_device *device = nouveau_dev(dev);
1229 struct nouveau_drm *drm = nouveau_drm(dev);
1212 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1230 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1213 const u32 loff = (or * 0x800) + (link * 0x80); 1231 const u32 loff = (or * 0x800) + (link * 0x80);
1214 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane); 1232 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
@@ -1236,25 +1254,26 @@ nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
1236 } 1254 }
1237 1255
1238 if (!config) { 1256 if (!config) {
1239 NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n"); 1257 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
1240 return; 1258 return;
1241 } 1259 }
1242 1260
1243 nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift); 1261 nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
1244 nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift); 1262 nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
1245 nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8); 1263 nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
1246 nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000); 1264 nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
1247} 1265}
1248 1266
1249static void 1267static void
1250nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc, 1268nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
1251 int link_nr, u32 link_bw, bool enhframe) 1269 int link_nr, u32 link_bw, bool enhframe)
1252{ 1270{
1271 struct nouveau_device *device = nouveau_dev(dev);
1253 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1272 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1254 const u32 loff = (or * 0x800) + (link * 0x80); 1273 const u32 loff = (or * 0x800) + (link * 0x80);
1255 const u32 soff = (or * 0x800); 1274 const u32 soff = (or * 0x800);
1256 u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000; 1275 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
1257 u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000; 1276 u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
1258 u32 script = 0x0000, lane_mask = 0; 1277 u32 script = 0x0000, lane_mask = 0;
1259 u8 *table, *entry; 1278 u8 *table, *entry;
1260 int i; 1279 int i;
@@ -1284,20 +1303,21 @@ nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
1284 for (i = 0; i < link_nr; i++) 1303 for (i = 0; i < link_nr; i++)
1285 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3); 1304 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
1286 1305
1287 nv_wr32(dev, 0x612300 + soff, clksor); 1306 nv_wr32(device, 0x612300 + soff, clksor);
1288 nv_wr32(dev, 0x61c10c + loff, dpctrl); 1307 nv_wr32(device, 0x61c10c + loff, dpctrl);
1289 nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask); 1308 nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
1290} 1309}
1291 1310
1292static void 1311static void
1293nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb, 1312nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
1294 u32 *link_nr, u32 *link_bw) 1313 u32 *link_nr, u32 *link_bw)
1295{ 1314{
1315 struct nouveau_device *device = nouveau_dev(dev);
1296 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1); 1316 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1297 const u32 loff = (or * 0x800) + (link * 0x80); 1317 const u32 loff = (or * 0x800) + (link * 0x80);
1298 const u32 soff = (or * 0x800); 1318 const u32 soff = (or * 0x800);
1299 u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000; 1319 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
1300 u32 clksor = nv_rd32(dev, 0x612300 + soff); 1320 u32 clksor = nv_rd32(device, 0x612300 + soff);
1301 1321
1302 if (dpctrl > 0x00030000) *link_nr = 4; 1322 if (dpctrl > 0x00030000) *link_nr = 4;
1303 else if (dpctrl > 0x00010000) *link_nr = 2; 1323 else if (dpctrl > 0x00010000) *link_nr = 2;
@@ -1308,9 +1328,10 @@ nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
1308} 1328}
1309 1329
1310static void 1330static void
1311nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb, 1331nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
1312 u32 crtc, u32 datarate) 1332 u32 crtc, u32 datarate)
1313{ 1333{
1334 struct nouveau_device *device = nouveau_dev(dev);
1314 const u32 symbol = 100000; 1335 const u32 symbol = 100000;
1315 const u32 TU = 64; 1336 const u32 TU = 64;
1316 u32 link_nr, link_bw; 1337 u32 link_nr, link_bw;
@@ -1330,7 +1351,7 @@ nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
1330 value += 5; 1351 value += 5;
1331 value |= 0x08000000; 1352 value |= 0x08000000;
1332 1353
1333 nv_wr32(dev, 0x616610 + (crtc * 0x800), value); 1354 nv_wr32(device, 0x616610 + (crtc * 0x800), value);
1334} 1355}
1335 1356
1336static void 1357static void
@@ -1338,6 +1359,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1338{ 1359{
1339 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1360 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1340 struct drm_device *dev = encoder->dev; 1361 struct drm_device *dev = encoder->dev;
1362 struct nouveau_device *device = nouveau_dev(dev);
1341 struct drm_encoder *partner; 1363 struct drm_encoder *partner;
1342 int or = nv_encoder->or; 1364 int or = nv_encoder->or;
1343 u32 dpms_ctrl; 1365 u32 dpms_ctrl;
@@ -1361,12 +1383,12 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1361 dpms_ctrl = (mode == DRM_MODE_DPMS_ON); 1383 dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
1362 dpms_ctrl |= 0x80000000; 1384 dpms_ctrl |= 0x80000000;
1363 1385
1364 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); 1386 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1365 nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl); 1387 nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
1366 nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000); 1388 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1367 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000); 1389 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
1368 1390
1369 if (nv_encoder->dcb->type == OUTPUT_DP) { 1391 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1370 struct dp_train_func func = { 1392 struct dp_train_func func = {
1371 .link_set = nvd0_sor_dp_link_set, 1393 .link_set = nvd0_sor_dp_link_set,
1372 .train_set = nvd0_sor_dp_train_set, 1394 .train_set = nvd0_sor_dp_train_set,
@@ -1427,7 +1449,7 @@ static void
1427nvd0_sor_prepare(struct drm_encoder *encoder) 1449nvd0_sor_prepare(struct drm_encoder *encoder)
1428{ 1450{
1429 nvd0_sor_disconnect(encoder); 1451 nvd0_sor_disconnect(encoder);
1430 if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP) 1452 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1431 evo_sync(encoder->dev, EVO_MASTER); 1453 evo_sync(encoder->dev, EVO_MASTER);
1432} 1454}
1433 1455
@@ -1441,11 +1463,11 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1441 struct drm_display_mode *mode) 1463 struct drm_display_mode *mode)
1442{ 1464{
1443 struct drm_device *dev = encoder->dev; 1465 struct drm_device *dev = encoder->dev;
1444 struct drm_nouveau_private *dev_priv = dev->dev_private; 1466 struct nouveau_drm *drm = nouveau_drm(dev);
1445 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1446 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 1468 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1447 struct nouveau_connector *nv_connector; 1469 struct nouveau_connector *nv_connector;
1448 struct nvbios *bios = &dev_priv->vbios; 1470 struct nvbios *bios = &drm->vbios;
1449 u32 mode_ctrl = (1 << nv_crtc->index); 1471 u32 mode_ctrl = (1 << nv_crtc->index);
1450 u32 syncs, magic, *push; 1472 u32 syncs, magic, *push;
1451 u32 or_config; 1473 u32 or_config;
@@ -1462,7 +1484,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1462 1484
1463 nv_connector = nouveau_encoder_connector_get(nv_encoder); 1485 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1464 switch (nv_encoder->dcb->type) { 1486 switch (nv_encoder->dcb->type) {
1465 case OUTPUT_TMDS: 1487 case DCB_OUTPUT_TMDS:
1466 if (nv_encoder->dcb->sorconf.link & 1) { 1488 if (nv_encoder->dcb->sorconf.link & 1) {
1467 if (mode->clock < 165000) 1489 if (mode->clock < 165000)
1468 mode_ctrl |= 0x00000100; 1490 mode_ctrl |= 0x00000100;
@@ -1478,7 +1500,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1478 1500
1479 nvd0_hdmi_mode_set(encoder, mode); 1501 nvd0_hdmi_mode_set(encoder, mode);
1480 break; 1502 break;
1481 case OUTPUT_LVDS: 1503 case DCB_OUTPUT_LVDS:
1482 or_config = (mode_ctrl & 0x00000f00) >> 8; 1504 or_config = (mode_ctrl & 0x00000f00) >> 8;
1483 if (bios->fp_no_ddc) { 1505 if (bios->fp_no_ddc) {
1484 if (bios->fp.dual_link) 1506 if (bios->fp.dual_link)
@@ -1507,7 +1529,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1507 1529
1508 } 1530 }
1509 break; 1531 break;
1510 case OUTPUT_DP: 1532 case DCB_OUTPUT_DP:
1511 if (nv_connector->base.display_info.bpc == 6) { 1533 if (nv_connector->base.display_info.bpc == 6) {
1512 nv_encoder->dp.datarate = mode->clock * 18 / 8; 1534 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1513 syncs |= 0x00000002 << 6; 1535 syncs |= 0x00000002 << 6;
@@ -1530,7 +1552,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1530 1552
1531 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON); 1553 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1532 1554
1533 if (nv_encoder->dcb->type == OUTPUT_DP) { 1555 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1534 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index, 1556 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
1535 nv_encoder->dp.datarate); 1557 nv_encoder->dp.datarate);
1536 } 1558 }
@@ -1571,7 +1593,7 @@ static const struct drm_encoder_funcs nvd0_sor_func = {
1571}; 1593};
1572 1594
1573static int 1595static int
1574nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe) 1596nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1575{ 1597{
1576 struct drm_device *dev = connector->dev; 1598 struct drm_device *dev = connector->dev;
1577 struct nouveau_encoder *nv_encoder; 1599 struct nouveau_encoder *nv_encoder;
@@ -1597,50 +1619,51 @@ nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
1597/****************************************************************************** 1619/******************************************************************************
1598 * IRQ 1620 * IRQ
1599 *****************************************************************************/ 1621 *****************************************************************************/
1600static struct dcb_entry * 1622static struct dcb_output *
1601lookup_dcb(struct drm_device *dev, int id, u32 mc) 1623lookup_dcb(struct drm_device *dev, int id, u32 mc)
1602{ 1624{
1603 struct drm_nouveau_private *dev_priv = dev->dev_private; 1625 struct nouveau_drm *drm = nouveau_drm(dev);
1604 int type, or, i, link = -1; 1626 int type, or, i, link = -1;
1605 1627
1606 if (id < 4) { 1628 if (id < 4) {
1607 type = OUTPUT_ANALOG; 1629 type = DCB_OUTPUT_ANALOG;
1608 or = id; 1630 or = id;
1609 } else { 1631 } else {
1610 switch (mc & 0x00000f00) { 1632 switch (mc & 0x00000f00) {
1611 case 0x00000000: link = 0; type = OUTPUT_LVDS; break; 1633 case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
1612 case 0x00000100: link = 0; type = OUTPUT_TMDS; break; 1634 case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
1613 case 0x00000200: link = 1; type = OUTPUT_TMDS; break; 1635 case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
1614 case 0x00000500: link = 0; type = OUTPUT_TMDS; break; 1636 case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
1615 case 0x00000800: link = 0; type = OUTPUT_DP; break; 1637 case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
1616 case 0x00000900: link = 1; type = OUTPUT_DP; break; 1638 case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
1617 default: 1639 default:
1618 NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc); 1640 NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
1619 return NULL; 1641 return NULL;
1620 } 1642 }
1621 1643
1622 or = id - 4; 1644 or = id - 4;
1623 } 1645 }
1624 1646
1625 for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { 1647 for (i = 0; i < drm->vbios.dcb.entries; i++) {
1626 struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; 1648 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
1627 if (dcb->type == type && (dcb->or & (1 << or)) && 1649 if (dcb->type == type && (dcb->or & (1 << or)) &&
1628 (link < 0 || link == !(dcb->sorconf.link & 1))) 1650 (link < 0 || link == !(dcb->sorconf.link & 1)))
1629 return dcb; 1651 return dcb;
1630 } 1652 }
1631 1653
1632 NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc); 1654 NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
1633 return NULL; 1655 return NULL;
1634} 1656}
1635 1657
1636static void 1658static void
1637nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask) 1659nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1638{ 1660{
1639 struct dcb_entry *dcb; 1661 struct nouveau_device *device = nouveau_dev(dev);
1662 struct dcb_output *dcb;
1640 int i; 1663 int i;
1641 1664
1642 for (i = 0; mask && i < 8; i++) { 1665 for (i = 0; mask && i < 8; i++) {
1643 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); 1666 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1644 if (!(mcc & (1 << crtc))) 1667 if (!(mcc & (1 << crtc)))
1645 continue; 1668 continue;
1646 1669
@@ -1651,20 +1674,22 @@ nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1651 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc); 1674 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
1652 } 1675 }
1653 1676
1654 nv_wr32(dev, 0x6101d4, 0x00000000); 1677 nv_wr32(device, 0x6101d4, 0x00000000);
1655 nv_wr32(dev, 0x6109d4, 0x00000000); 1678 nv_wr32(device, 0x6109d4, 0x00000000);
1656 nv_wr32(dev, 0x6101d0, 0x80000000); 1679 nv_wr32(device, 0x6101d0, 0x80000000);
1657} 1680}
1658 1681
1659static void 1682static void
1660nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask) 1683nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1661{ 1684{
1662 struct dcb_entry *dcb; 1685 struct nouveau_device *device = nouveau_dev(dev);
1686 struct nouveau_drm *drm = nouveau_drm(dev);
1687 struct dcb_output *dcb;
1663 u32 or, tmp, pclk; 1688 u32 or, tmp, pclk;
1664 int i; 1689 int i;
1665 1690
1666 for (i = 0; mask && i < 8; i++) { 1691 for (i = 0; mask && i < 8; i++) {
1667 u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20)); 1692 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1668 if (!(mcc & (1 << crtc))) 1693 if (!(mcc & (1 << crtc)))
1669 continue; 1694 continue;
1670 1695
@@ -1675,16 +1700,16 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1675 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc); 1700 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
1676 } 1701 }
1677 1702
1678 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; 1703 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1679 NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n", 1704 NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
1680 crtc, pclk, mask); 1705 crtc, pclk, mask);
1681 if (pclk && (mask & 0x00010000)) { 1706 if (pclk && (mask & 0x00010000)) {
1682 nv50_crtc_set_clock(dev, crtc, pclk); 1707 nv50_crtc_set_clock(dev, crtc, pclk);
1683 } 1708 }
1684 1709
1685 for (i = 0; mask && i < 8; i++) { 1710 for (i = 0; mask && i < 8; i++) {
1686 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); 1711 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1687 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); 1712 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1688 if (!(mcp & (1 << crtc))) 1713 if (!(mcp & (1 << crtc)))
1689 continue; 1714 continue;
1690 1715
@@ -1695,20 +1720,20 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1695 1720
1696 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc); 1721 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
1697 1722
1698 nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000); 1723 nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
1699 switch (dcb->type) { 1724 switch (dcb->type) {
1700 case OUTPUT_ANALOG: 1725 case DCB_OUTPUT_ANALOG:
1701 nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000); 1726 nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
1702 break; 1727 break;
1703 case OUTPUT_TMDS: 1728 case DCB_OUTPUT_TMDS:
1704 case OUTPUT_LVDS: 1729 case DCB_OUTPUT_LVDS:
1705 case OUTPUT_DP: 1730 case DCB_OUTPUT_DP:
1706 if (cfg & 0x00000100) 1731 if (cfg & 0x00000100)
1707 tmp = 0x00000101; 1732 tmp = 0x00000101;
1708 else 1733 else
1709 tmp = 0x00000000; 1734 tmp = 0x00000000;
1710 1735
1711 nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp); 1736 nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
1712 break; 1737 break;
1713 default: 1738 default:
1714 break; 1739 break;
@@ -1717,22 +1742,23 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1717 break; 1742 break;
1718 } 1743 }
1719 1744
1720 nv_wr32(dev, 0x6101d4, 0x00000000); 1745 nv_wr32(device, 0x6101d4, 0x00000000);
1721 nv_wr32(dev, 0x6109d4, 0x00000000); 1746 nv_wr32(device, 0x6109d4, 0x00000000);
1722 nv_wr32(dev, 0x6101d0, 0x80000000); 1747 nv_wr32(device, 0x6101d0, 0x80000000);
1723} 1748}
1724 1749
1725static void 1750static void
1726nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask) 1751nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1727{ 1752{
1728 struct dcb_entry *dcb; 1753 struct nouveau_device *device = nouveau_dev(dev);
1754 struct dcb_output *dcb;
1729 int pclk, i; 1755 int pclk, i;
1730 1756
1731 pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000; 1757 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1732 1758
1733 for (i = 0; mask && i < 8; i++) { 1759 for (i = 0; mask && i < 8; i++) {
1734 u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20)); 1760 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1735 u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20)); 1761 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1736 if (!(mcp & (1 << crtc))) 1762 if (!(mcp & (1 << crtc)))
1737 continue; 1763 continue;
1738 1764
@@ -1743,34 +1769,36 @@ nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1743 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc); 1769 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
1744 } 1770 }
1745 1771
1746 nv_wr32(dev, 0x6101d4, 0x00000000); 1772 nv_wr32(device, 0x6101d4, 0x00000000);
1747 nv_wr32(dev, 0x6109d4, 0x00000000); 1773 nv_wr32(device, 0x6109d4, 0x00000000);
1748 nv_wr32(dev, 0x6101d0, 0x80000000); 1774 nv_wr32(device, 0x6101d0, 0x80000000);
1749} 1775}
1750 1776
1751static void 1777static void
1752nvd0_display_bh(unsigned long data) 1778nvd0_display_bh(unsigned long data)
1753{ 1779{
1754 struct drm_device *dev = (struct drm_device *)data; 1780 struct drm_device *dev = (struct drm_device *)data;
1781 struct nouveau_device *device = nouveau_dev(dev);
1782 struct nouveau_drm *drm = nouveau_drm(dev);
1755 struct nvd0_display *disp = nvd0_display(dev); 1783 struct nvd0_display *disp = nvd0_display(dev);
1756 u32 mask = 0, crtc = ~0; 1784 u32 mask = 0, crtc = ~0;
1757 int i; 1785 int i;
1758 1786
1759 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) { 1787 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
1760 NV_INFO(dev, "PDISP: modeset req %d\n", disp->modeset); 1788 NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
1761 NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n", 1789 NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
1762 nv_rd32(dev, 0x6101d0), 1790 nv_rd32(device, 0x6101d0),
1763 nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4)); 1791 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
1764 for (i = 0; i < 8; i++) { 1792 for (i = 0; i < 8; i++) {
1765 NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n", 1793 NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
1766 i < 4 ? "DAC" : "SOR", i, 1794 i < 4 ? "DAC" : "SOR", i,
1767 nv_rd32(dev, 0x640180 + (i * 0x20)), 1795 nv_rd32(device, 0x640180 + (i * 0x20)),
1768 nv_rd32(dev, 0x660180 + (i * 0x20))); 1796 nv_rd32(device, 0x660180 + (i * 0x20)));
1769 } 1797 }
1770 } 1798 }
1771 1799
1772 while (!mask && ++crtc < dev->mode_config.num_crtc) 1800 while (!mask && ++crtc < dev->mode_config.num_crtc)
1773 mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800)); 1801 mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
1774 1802
1775 if (disp->modeset & 0x00000001) 1803 if (disp->modeset & 0x00000001)
1776 nvd0_display_unk1_handler(dev, crtc, mask); 1804 nvd0_display_unk1_handler(dev, crtc, mask);
@@ -1780,67 +1808,60 @@ nvd0_display_bh(unsigned long data)
1780 nvd0_display_unk4_handler(dev, crtc, mask); 1808 nvd0_display_unk4_handler(dev, crtc, mask);
1781} 1809}
1782 1810
1783static void 1811void
1784nvd0_display_intr(struct drm_device *dev) 1812nvd0_display_intr(struct drm_device *dev)
1785{ 1813{
1786 struct nvd0_display *disp = nvd0_display(dev); 1814 struct nvd0_display *disp = nvd0_display(dev);
1787 u32 intr = nv_rd32(dev, 0x610088); 1815 struct nouveau_device *device = nouveau_dev(dev);
1788 int i; 1816 struct nouveau_drm *drm = nouveau_drm(dev);
1817 u32 intr = nv_rd32(device, 0x610088);
1789 1818
1790 if (intr & 0x00000001) { 1819 if (intr & 0x00000001) {
1791 u32 stat = nv_rd32(dev, 0x61008c); 1820 u32 stat = nv_rd32(device, 0x61008c);
1792 nv_wr32(dev, 0x61008c, stat); 1821 nv_wr32(device, 0x61008c, stat);
1793 intr &= ~0x00000001; 1822 intr &= ~0x00000001;
1794 } 1823 }
1795 1824
1796 if (intr & 0x00000002) { 1825 if (intr & 0x00000002) {
1797 u32 stat = nv_rd32(dev, 0x61009c); 1826 u32 stat = nv_rd32(device, 0x61009c);
1798 int chid = ffs(stat) - 1; 1827 int chid = ffs(stat) - 1;
1799 if (chid >= 0) { 1828 if (chid >= 0) {
1800 u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12)); 1829 u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
1801 u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12)); 1830 u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
1802 u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12)); 1831 u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
1803 1832
1804 NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x " 1833 NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
1805 "0x%08x 0x%08x\n", 1834 "0x%08x 0x%08x\n",
1806 chid, (mthd & 0x0000ffc), data, mthd, unkn); 1835 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1807 nv_wr32(dev, 0x61009c, (1 << chid)); 1836 nv_wr32(device, 0x61009c, (1 << chid));
1808 nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000); 1837 nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1809 } 1838 }
1810 1839
1811 intr &= ~0x00000002; 1840 intr &= ~0x00000002;
1812 } 1841 }
1813 1842
1814 if (intr & 0x00100000) { 1843 if (intr & 0x00100000) {
1815 u32 stat = nv_rd32(dev, 0x6100ac); 1844 u32 stat = nv_rd32(device, 0x6100ac);
1816 1845
1817 if (stat & 0x00000007) { 1846 if (stat & 0x00000007) {
1818 disp->modeset = stat; 1847 disp->modeset = stat;
1819 tasklet_schedule(&disp->tasklet); 1848 tasklet_schedule(&disp->tasklet);
1820 1849
1821 nv_wr32(dev, 0x6100ac, (stat & 0x00000007)); 1850 nv_wr32(device, 0x6100ac, (stat & 0x00000007));
1822 stat &= ~0x00000007; 1851 stat &= ~0x00000007;
1823 } 1852 }
1824 1853
1825 if (stat) { 1854 if (stat) {
1826 NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat); 1855 NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
1827 nv_wr32(dev, 0x6100ac, stat); 1856 nv_wr32(device, 0x6100ac, stat);
1828 } 1857 }
1829 1858
1830 intr &= ~0x00100000; 1859 intr &= ~0x00100000;
1831 } 1860 }
1832 1861
1833 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1862 intr &= ~0x0f000000; /* vblank, handled in core */
1834 u32 mask = 0x01000000 << i;
1835 if (intr & mask) {
1836 u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
1837 nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
1838 intr &= ~mask;
1839 }
1840 }
1841
1842 if (intr) 1863 if (intr)
1843 NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr); 1864 NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
1844} 1865}
1845 1866
1846/****************************************************************************** 1867/******************************************************************************
@@ -1867,15 +1888,17 @@ int
1867nvd0_display_init(struct drm_device *dev) 1888nvd0_display_init(struct drm_device *dev)
1868{ 1889{
1869 struct nvd0_display *disp = nvd0_display(dev); 1890 struct nvd0_display *disp = nvd0_display(dev);
1891 struct nouveau_device *device = nouveau_dev(dev);
1892 struct nouveau_drm *drm = nouveau_drm(dev);
1870 int ret, i; 1893 int ret, i;
1871 u32 *push; 1894 u32 *push;
1872 1895
1873 if (nv_rd32(dev, 0x6100ac) & 0x00000100) { 1896 if (nv_rd32(device, 0x6100ac) & 0x00000100) {
1874 nv_wr32(dev, 0x6100ac, 0x00000100); 1897 nv_wr32(device, 0x6100ac, 0x00000100);
1875 nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000); 1898 nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1876 if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) { 1899 if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
1877 NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n", 1900 NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
1878 nv_rd32(dev, 0x6194e8)); 1901 nv_rd32(device, 0x6194e8));
1879 return -EBUSY; 1902 return -EBUSY;
1880 } 1903 }
1881 } 1904 }
@@ -1884,27 +1907,27 @@ nvd0_display_init(struct drm_device *dev)
1884 * work at all unless you do the SOR part below. 1907 * work at all unless you do the SOR part below.
1885 */ 1908 */
1886 for (i = 0; i < 3; i++) { 1909 for (i = 0; i < 3; i++) {
1887 u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800)); 1910 u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
1888 nv_wr32(dev, 0x6101c0 + (i * 0x800), dac); 1911 nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
1889 } 1912 }
1890 1913
1891 for (i = 0; i < 4; i++) { 1914 for (i = 0; i < 4; i++) {
1892 u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800)); 1915 u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
1893 nv_wr32(dev, 0x6301c4 + (i * 0x800), sor); 1916 nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
1894 } 1917 }
1895 1918
1896 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1919 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1897 u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800)); 1920 u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
1898 u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800)); 1921 u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
1899 u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800)); 1922 u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
1900 nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0); 1923 nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
1901 nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1); 1924 nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
1902 nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2); 1925 nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
1903 } 1926 }
1904 1927
1905 /* point at our hash table / objects, enable interrupts */ 1928 /* point at our hash table / objects, enable interrupts */
1906 nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9); 1929 nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
1907 nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307); 1930 nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
1908 1931
1909 /* init master */ 1932 /* init master */
1910 ret = evo_init_dma(dev, EVO_MASTER); 1933 ret = evo_init_dma(dev, EVO_MASTER);
@@ -1944,7 +1967,6 @@ error:
1944void 1967void
1945nvd0_display_destroy(struct drm_device *dev) 1968nvd0_display_destroy(struct drm_device *dev)
1946{ 1969{
1947 struct drm_nouveau_private *dev_priv = dev->dev_private;
1948 struct nvd0_display *disp = nvd0_display(dev); 1970 struct nvd0_display *disp = nvd0_display(dev);
1949 struct pci_dev *pdev = dev->pdev; 1971 struct pci_dev *pdev = dev->pdev;
1950 int i; 1972 int i;
@@ -1957,31 +1979,36 @@ nvd0_display_destroy(struct drm_device *dev)
1957 nouveau_gpuobj_ref(NULL, &disp->mem); 1979 nouveau_gpuobj_ref(NULL, &disp->mem);
1958 nouveau_bo_unmap(disp->sync); 1980 nouveau_bo_unmap(disp->sync);
1959 nouveau_bo_ref(NULL, &disp->sync); 1981 nouveau_bo_ref(NULL, &disp->sync);
1960 nouveau_irq_unregister(dev, 26);
1961 1982
1962 dev_priv->engine.display.priv = NULL; 1983 nouveau_display(dev)->priv = NULL;
1963 kfree(disp); 1984 kfree(disp);
1964} 1985}
1965 1986
1966int 1987int
1967nvd0_display_create(struct drm_device *dev) 1988nvd0_display_create(struct drm_device *dev)
1968{ 1989{
1969 struct drm_nouveau_private *dev_priv = dev->dev_private; 1990 struct nouveau_device *device = nouveau_dev(dev);
1970 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 1991 struct nouveau_drm *drm = nouveau_drm(dev);
1971 struct dcb_table *dcb = &dev_priv->vbios.dcb; 1992 struct nouveau_bar *bar = nouveau_bar(device);
1993 struct nouveau_fb *pfb = nouveau_fb(device);
1994 struct dcb_table *dcb = &drm->vbios.dcb;
1972 struct drm_connector *connector, *tmp; 1995 struct drm_connector *connector, *tmp;
1973 struct pci_dev *pdev = dev->pdev; 1996 struct pci_dev *pdev = dev->pdev;
1974 struct nvd0_display *disp; 1997 struct nvd0_display *disp;
1975 struct dcb_entry *dcbe; 1998 struct dcb_output *dcbe;
1976 int crtcs, ret, i; 1999 int crtcs, ret, i;
1977 2000
1978 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 2001 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
1979 if (!disp) 2002 if (!disp)
1980 return -ENOMEM; 2003 return -ENOMEM;
1981 dev_priv->engine.display.priv = disp; 2004
2005 nouveau_display(dev)->priv = disp;
2006 nouveau_display(dev)->dtor = nvd0_display_destroy;
2007 nouveau_display(dev)->init = nvd0_display_init;
2008 nouveau_display(dev)->fini = nvd0_display_fini;
1982 2009
1983 /* create crtc objects to represent the hw heads */ 2010 /* create crtc objects to represent the hw heads */
1984 crtcs = nv_rd32(dev, 0x022448); 2011 crtcs = nv_rd32(device, 0x022448);
1985 for (i = 0; i < crtcs; i++) { 2012 for (i = 0; i < crtcs; i++) {
1986 ret = nvd0_crtc_create(dev, i); 2013 ret = nvd0_crtc_create(dev, i);
1987 if (ret) 2014 if (ret)
@@ -1995,22 +2022,22 @@ nvd0_display_create(struct drm_device *dev)
1995 continue; 2022 continue;
1996 2023
1997 if (dcbe->location != DCB_LOC_ON_CHIP) { 2024 if (dcbe->location != DCB_LOC_ON_CHIP) {
1998 NV_WARN(dev, "skipping off-chip encoder %d/%d\n", 2025 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
1999 dcbe->type, ffs(dcbe->or) - 1); 2026 dcbe->type, ffs(dcbe->or) - 1);
2000 continue; 2027 continue;
2001 } 2028 }
2002 2029
2003 switch (dcbe->type) { 2030 switch (dcbe->type) {
2004 case OUTPUT_TMDS: 2031 case DCB_OUTPUT_TMDS:
2005 case OUTPUT_LVDS: 2032 case DCB_OUTPUT_LVDS:
2006 case OUTPUT_DP: 2033 case DCB_OUTPUT_DP:
2007 nvd0_sor_create(connector, dcbe); 2034 nvd0_sor_create(connector, dcbe);
2008 break; 2035 break;
2009 case OUTPUT_ANALOG: 2036 case DCB_OUTPUT_ANALOG:
2010 nvd0_dac_create(connector, dcbe); 2037 nvd0_dac_create(connector, dcbe);
2011 break; 2038 break;
2012 default: 2039 default:
2013 NV_WARN(dev, "skipping unsupported encoder %d/%d\n", 2040 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2014 dcbe->type, ffs(dcbe->or) - 1); 2041 dcbe->type, ffs(dcbe->or) - 1);
2015 continue; 2042 continue;
2016 } 2043 }
@@ -2021,14 +2048,13 @@ nvd0_display_create(struct drm_device *dev)
2021 if (connector->encoder_ids[0]) 2048 if (connector->encoder_ids[0])
2022 continue; 2049 continue;
2023 2050
2024 NV_WARN(dev, "%s has no encoders, removing\n", 2051 NV_WARN(drm, "%s has no encoders, removing\n",
2025 drm_get_connector_name(connector)); 2052 drm_get_connector_name(connector));
2026 connector->funcs->destroy(connector); 2053 connector->funcs->destroy(connector);
2027 } 2054 }
2028 2055
2029 /* setup interrupt handling */ 2056 /* setup interrupt handling */
2030 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev); 2057 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
2031 nouveau_irq_register(dev, 26, nvd0_display_intr);
2032 2058
2033 /* small shared memory area we use for notifiers and semaphores */ 2059 /* small shared memory area we use for notifiers and semaphores */
2034 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2060 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2045,7 +2071,7 @@ nvd0_display_create(struct drm_device *dev)
2045 goto out; 2071 goto out;
2046 2072
2047 /* hash table and dma objects for the memory areas we care about */ 2073 /* hash table and dma objects for the memory areas we care about */
2048 ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000, 2074 ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
2049 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem); 2075 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
2050 if (ret) 2076 if (ret)
2051 goto out; 2077 goto out;
@@ -2077,7 +2103,7 @@ nvd0_display_create(struct drm_device *dev)
2077 2103
2078 nv_wo32(disp->mem, dmao + 0x20, 0x00000049); 2104 nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
2079 nv_wo32(disp->mem, dmao + 0x24, 0x00000000); 2105 nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
2080 nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8); 2106 nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
2081 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000); 2107 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
2082 nv_wo32(disp->mem, dmao + 0x30, 0x00000000); 2108 nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
2083 nv_wo32(disp->mem, dmao + 0x34, 0x00000000); 2109 nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
@@ -2087,7 +2113,7 @@ nvd0_display_create(struct drm_device *dev)
2087 2113
2088 nv_wo32(disp->mem, dmao + 0x40, 0x00000009); 2114 nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
2089 nv_wo32(disp->mem, dmao + 0x44, 0x00000000); 2115 nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
2090 nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8); 2116 nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
2091 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000); 2117 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
2092 nv_wo32(disp->mem, dmao + 0x50, 0x00000000); 2118 nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
2093 nv_wo32(disp->mem, dmao + 0x54, 0x00000000); 2119 nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
@@ -2097,7 +2123,7 @@ nvd0_display_create(struct drm_device *dev)
2097 2123
2098 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009); 2124 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
2099 nv_wo32(disp->mem, dmao + 0x64, 0x00000000); 2125 nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
2100 nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8); 2126 nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
2101 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000); 2127 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
2102 nv_wo32(disp->mem, dmao + 0x70, 0x00000000); 2128 nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
2103 nv_wo32(disp->mem, dmao + 0x74, 0x00000000); 2129 nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
@@ -2106,7 +2132,7 @@ nvd0_display_create(struct drm_device *dev)
2106 ((dmao + 0x60) << 9)); 2132 ((dmao + 0x60) << 9));
2107 } 2133 }
2108 2134
2109 pinstmem->flush(dev); 2135 bar->flush(bar);
2110 2136
2111out: 2137out:
2112 if (ret) 2138 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
deleted file mode 100644
index 0eba15b2201a..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ /dev/null
@@ -1,453 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drv.h"
28#include "nouveau_mm.h"
29#include "nouveau_fifo.h"
30
31#define NVE0_FIFO_ENGINE_NUM 32
32
33static void nve0_fifo_isr(struct drm_device *);
34
35struct nve0_fifo_engine {
36 struct nouveau_gpuobj *playlist[2];
37 int cur_playlist;
38};
39
40struct nve0_fifo_priv {
41 struct nouveau_fifo_priv base;
42 struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
43 struct {
44 struct nouveau_gpuobj *mem;
45 struct nouveau_vma bar;
46 } user;
47 int spoon_nr;
48};
49
50struct nve0_fifo_chan {
51 struct nouveau_fifo_chan base;
52 u32 engine;
53};
54
55static void
56nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
57{
58 struct drm_nouveau_private *dev_priv = dev->dev_private;
59 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
60 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
61 struct nve0_fifo_engine *peng = &priv->engine[engine];
62 struct nouveau_gpuobj *cur;
63 u32 match = (engine << 16) | 0x00000001;
64 int ret, i, p;
65
66 cur = peng->playlist[peng->cur_playlist];
67 if (unlikely(cur == NULL)) {
68 ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
69 if (ret) {
70 NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
71 return;
72 }
73
74 peng->playlist[peng->cur_playlist] = cur;
75 }
76
77 peng->cur_playlist = !peng->cur_playlist;
78
79 for (i = 0, p = 0; i < priv->base.channels; i++) {
80 u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
81 if (ctrl != match)
82 continue;
83 nv_wo32(cur, p + 0, i);
84 nv_wo32(cur, p + 4, 0x00000000);
85 p += 8;
86 }
87 pinstmem->flush(dev);
88
89 nv_wr32(dev, 0x002270, cur->vinst >> 12);
90 nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
91 if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
92 NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
93}
94
95static int
96nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
101 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
102 struct nve0_fifo_chan *fctx;
103 u64 usermem = priv->user.mem->vinst + chan->id * 512;
104 u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
105 int ret = 0, i;
106
107 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
108 if (!fctx)
109 return -ENOMEM;
110
111 fctx->engine = 0; /* PGRAPH */
112
113 /* allocate vram for control regs, map into polling area */
114 chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
115 priv->user.bar.offset + (chan->id * 512), 512);
116 if (!chan->user) {
117 ret = -ENOMEM;
118 goto error;
119 }
120
121 for (i = 0; i < 0x100; i += 4)
122 nv_wo32(chan->ramin, i, 0x00000000);
123 nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
124 nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
125 nv_wo32(chan->ramin, 0x10, 0x0000face);
126 nv_wo32(chan->ramin, 0x30, 0xfffff902);
127 nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
128 nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
129 upper_32_bits(ib_virt));
130 nv_wo32(chan->ramin, 0x84, 0x20400000);
131 nv_wo32(chan->ramin, 0x94, 0x30000001);
132 nv_wo32(chan->ramin, 0x9c, 0x00000100);
133 nv_wo32(chan->ramin, 0xac, 0x0000001f);
134 nv_wo32(chan->ramin, 0xe4, 0x00000000);
135 nv_wo32(chan->ramin, 0xe8, chan->id);
136 nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
137 nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
138 pinstmem->flush(dev);
139
140 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
141 (chan->ramin->vinst >> 12));
142 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
143 nve0_fifo_playlist_update(dev, fctx->engine);
144 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
145
146error:
147 if (ret)
148 priv->base.base.context_del(chan, engine);
149 return ret;
150}
151
152static void
153nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
154{
155 struct nve0_fifo_chan *fctx = chan->engctx[engine];
156 struct drm_device *dev = chan->dev;
157
158 nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
159 nv_wr32(dev, 0x002634, chan->id);
160 if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
161 NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
162 nve0_fifo_playlist_update(dev, fctx->engine);
163 nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
164
165 if (chan->user) {
166 iounmap(chan->user);
167 chan->user = NULL;
168 }
169
170 chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
171 kfree(fctx);
172}
173
174static int
175nve0_fifo_init(struct drm_device *dev, int engine)
176{
177 struct drm_nouveau_private *dev_priv = dev->dev_private;
178 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
179 struct nve0_fifo_chan *fctx;
180 int i;
181
182 /* reset PFIFO, enable all available PSUBFIFO areas */
183 nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
184 nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
185 nv_wr32(dev, 0x000204, 0xffffffff);
186
187 priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
188 NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
189
190 /* PSUBFIFO[n] */
191 for (i = 0; i < priv->spoon_nr; i++) {
192 nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
193 nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
194 nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
195 }
196
197 nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
198
199 nv_wr32(dev, 0x002a00, 0xffffffff);
200 nv_wr32(dev, 0x002100, 0xffffffff);
201 nv_wr32(dev, 0x002140, 0xbfffffff);
202
203 /* restore PFIFO context table */
204 for (i = 0; i < priv->base.channels; i++) {
205 struct nouveau_channel *chan = dev_priv->channels.ptr[i];
206 if (!chan || !(fctx = chan->engctx[engine]))
207 continue;
208
209 nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
210 (chan->ramin->vinst >> 12));
211 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
212 nve0_fifo_playlist_update(dev, fctx->engine);
213 nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
214 }
215
216 return 0;
217}
218
219static int
220nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
221{
222 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
223 int i;
224
225 for (i = 0; i < priv->base.channels; i++) {
226 if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
227 continue;
228
229 nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
230 nv_wr32(dev, 0x002634, i);
231 if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
232 NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
233 i, nv_rd32(dev, 0x002634));
234 return -EBUSY;
235 }
236 }
237
238 nv_wr32(dev, 0x002140, 0x00000000);
239 return 0;
240}
241
242struct nouveau_enum nve0_fifo_fault_unit[] = {
243 {}
244};
245
246struct nouveau_enum nve0_fifo_fault_reason[] = {
247 { 0x00, "PT_NOT_PRESENT" },
248 { 0x01, "PT_TOO_SHORT" },
249 { 0x02, "PAGE_NOT_PRESENT" },
250 { 0x03, "VM_LIMIT_EXCEEDED" },
251 { 0x04, "NO_CHANNEL" },
252 { 0x05, "PAGE_SYSTEM_ONLY" },
253 { 0x06, "PAGE_READ_ONLY" },
254 { 0x0a, "COMPRESSED_SYSRAM" },
255 { 0x0c, "INVALID_STORAGE_TYPE" },
256 {}
257};
258
259struct nouveau_enum nve0_fifo_fault_hubclient[] = {
260 {}
261};
262
263struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
264 {}
265};
266
267struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
268 { 0x00200000, "ILLEGAL_MTHD" },
269 { 0x00800000, "EMPTY_SUBC" },
270 {}
271};
272
273static void
274nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
275{
276 u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
277 u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
278 u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
279 u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
280 u32 client = (stat & 0x00001f00) >> 8;
281
282 NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
283 (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
284 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
285 printk("] from ");
286 nouveau_enum_print(nve0_fifo_fault_unit, unit);
287 if (stat & 0x00000040) {
288 printk("/");
289 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
290 } else {
291 printk("/GPC%d/", (stat & 0x1f000000) >> 24);
292 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
293 }
294 printk(" on channel 0x%010llx\n", (u64)inst << 12);
295}
296
297static int
298nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
299{
300 struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
301 struct drm_nouveau_private *dev_priv = dev->dev_private;
302 struct nouveau_channel *chan = NULL;
303 unsigned long flags;
304 int ret = -EINVAL;
305
306 spin_lock_irqsave(&dev_priv->channels.lock, flags);
307 if (likely(chid >= 0 && chid < priv->base.channels)) {
308 chan = dev_priv->channels.ptr[chid];
309 if (likely(chan))
310 ret = nouveau_finish_page_flip(chan, NULL);
311 }
312 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
313 return ret;
314}
315
316static void
317nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
318{
319 u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
320 u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
321 u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
322 u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
323 u32 subc = (addr & 0x00070000);
324 u32 mthd = (addr & 0x00003ffc);
325 u32 show = stat;
326
327 if (stat & 0x00200000) {
328 if (mthd == 0x0054) {
329 if (!nve0_fifo_page_flip(dev, chid))
330 show &= ~0x00200000;
331 }
332 }
333
334 if (show) {
335 NV_INFO(dev, "PFIFO%d:", unit);
336 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
337 NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
338 unit, chid, subc, mthd, data);
339 }
340
341 nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
342 nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
343}
344
345static void
346nve0_fifo_isr(struct drm_device *dev)
347{
348 u32 mask = nv_rd32(dev, 0x002140);
349 u32 stat = nv_rd32(dev, 0x002100) & mask;
350
351 if (stat & 0x00000100) {
352 NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
353 nv_wr32(dev, 0x002100, 0x00000100);
354 stat &= ~0x00000100;
355 }
356
357 if (stat & 0x10000000) {
358 u32 units = nv_rd32(dev, 0x00259c);
359 u32 u = units;
360
361 while (u) {
362 int i = ffs(u) - 1;
363 nve0_fifo_isr_vm_fault(dev, i);
364 u &= ~(1 << i);
365 }
366
367 nv_wr32(dev, 0x00259c, units);
368 stat &= ~0x10000000;
369 }
370
371 if (stat & 0x20000000) {
372 u32 units = nv_rd32(dev, 0x0025a0);
373 u32 u = units;
374
375 while (u) {
376 int i = ffs(u) - 1;
377 nve0_fifo_isr_subfifo_intr(dev, i);
378 u &= ~(1 << i);
379 }
380
381 nv_wr32(dev, 0x0025a0, units);
382 stat &= ~0x20000000;
383 }
384
385 if (stat & 0x40000000) {
386 NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
387 nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
388 stat &= ~0x40000000;
389 }
390
391 if (stat) {
392 NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
393 nv_wr32(dev, 0x002100, stat);
394 nv_wr32(dev, 0x002140, 0);
395 }
396}
397
398static void
399nve0_fifo_destroy(struct drm_device *dev, int engine)
400{
401 struct drm_nouveau_private *dev_priv = dev->dev_private;
402 struct nve0_fifo_priv *priv = nv_engine(dev, engine);
403 int i;
404
405 nouveau_vm_put(&priv->user.bar);
406 nouveau_gpuobj_ref(NULL, &priv->user.mem);
407
408 for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
409 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
410 nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
411 }
412
413 dev_priv->eng[engine] = NULL;
414 kfree(priv);
415}
416
417int
418nve0_fifo_create(struct drm_device *dev)
419{
420 struct drm_nouveau_private *dev_priv = dev->dev_private;
421 struct nve0_fifo_priv *priv;
422 int ret;
423
424 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
425 if (!priv)
426 return -ENOMEM;
427
428 priv->base.base.destroy = nve0_fifo_destroy;
429 priv->base.base.init = nve0_fifo_init;
430 priv->base.base.fini = nve0_fifo_fini;
431 priv->base.base.context_new = nve0_fifo_context_new;
432 priv->base.base.context_del = nve0_fifo_context_del;
433 priv->base.channels = 4096;
434 dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
435
436 ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
437 NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
438 if (ret)
439 goto error;
440
441 ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
442 12, NV_MEM_ACCESS_RW, &priv->user.bar);
443 if (ret)
444 goto error;
445
446 nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
447
448 nouveau_irq_register(dev, 8, nve0_fifo_isr);
449error:
450 if (ret)
451 priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
452 return ret;
453}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.c b/drivers/gpu/drm/nouveau/nve0_graph.c
deleted file mode 100644
index b784a8b32458..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.c
+++ /dev/null
@@ -1,831 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/firmware.h>
26#include <linux/module.h>
27
28#include <drm/drmP.h>
29
30#include "nouveau_drv.h"
31#include "nouveau_mm.h"
32#include "nouveau_fifo.h"
33
34#include "nve0_graph.h"
35
36static void
37nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
38{
39 NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
40 nv_rd32(dev, base + 0x400));
41 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
42 nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
43 nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
44 NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
45 nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
46 nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
47}
48
49static void
50nve0_graph_ctxctl_debug(struct drm_device *dev)
51{
52 u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
53 u32 gpc;
54
55 nve0_graph_ctxctl_debug_unit(dev, 0x409000);
56 for (gpc = 0; gpc < gpcnr; gpc++)
57 nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
58}
59
60static int
61nve0_graph_load_context(struct nouveau_channel *chan)
62{
63 struct drm_device *dev = chan->dev;
64
65 nv_wr32(dev, 0x409840, 0x00000030);
66 nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
67 nv_wr32(dev, 0x409504, 0x00000003);
68 if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
69 NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
70
71 return 0;
72}
73
74static int
75nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
76{
77 nv_wr32(dev, 0x409840, 0x00000003);
78 nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
79 nv_wr32(dev, 0x409504, 0x00000009);
80 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
81 NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
82 return -EBUSY;
83 }
84
85 return 0;
86}
87
88static int
89nve0_graph_construct_context(struct nouveau_channel *chan)
90{
91 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
92 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
93 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
94 struct drm_device *dev = chan->dev;
95 int ret, i;
96 u32 *ctx;
97
98 ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
99 if (!ctx)
100 return -ENOMEM;
101
102 nve0_graph_load_context(chan);
103
104 nv_wo32(grch->grctx, 0x1c, 1);
105 nv_wo32(grch->grctx, 0x20, 0);
106 nv_wo32(grch->grctx, 0x28, 0);
107 nv_wo32(grch->grctx, 0x2c, 0);
108 dev_priv->engine.instmem.flush(dev);
109
110 ret = nve0_grctx_generate(chan);
111 if (ret)
112 goto err;
113
114 ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
115 if (ret)
116 goto err;
117
118 for (i = 0; i < priv->grctx_size; i += 4)
119 ctx[i / 4] = nv_ro32(grch->grctx, i);
120
121 priv->grctx_vals = ctx;
122 return 0;
123
124err:
125 kfree(ctx);
126 return ret;
127}
128
129static int
130nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
131{
132 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
133 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
134 struct drm_device *dev = chan->dev;
135 u32 magic[GPC_MAX][2];
136 u16 offset = 0x0000;
137 int gpc;
138 int ret;
139
140 ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
141 &grch->unk408004);
142 if (ret)
143 return ret;
144
145 ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
146 &grch->unk40800c);
147 if (ret)
148 return ret;
149
150 ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
151 NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
152 &grch->unk418810);
153 if (ret)
154 return ret;
155
156 ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
157 &grch->mmio);
158 if (ret)
159 return ret;
160
161#define mmio(r,v) do { \
162 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
163 nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
164 grch->mmio_nr++; \
165} while (0)
166 mmio(0x40800c, grch->unk40800c->linst >> 8);
167 mmio(0x408010, 0x80000000);
168 mmio(0x419004, grch->unk40800c->linst >> 8);
169 mmio(0x419008, 0x00000000);
170 mmio(0x4064cc, 0x80000000);
171 mmio(0x408004, grch->unk408004->linst >> 8);
172 mmio(0x408008, 0x80000030);
173 mmio(0x418808, grch->unk408004->linst >> 8);
174 mmio(0x41880c, 0x80000030);
175 mmio(0x4064c8, 0x01800600);
176 mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
177 mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
178 mmio(0x405830, 0x02180648);
179 mmio(0x4064c4, 0x0192ffff);
180
181 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
182 u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
183 u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
184 magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
185 magic[gpc][1] = 0x00000000 | (magic1 << 16);
186 offset += 0x0324 * priv->tpc_nr[gpc];
187 }
188
189 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
190 mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
191 mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
192 offset += 0x07ff * priv->tpc_nr[gpc];
193 }
194
195 mmio(0x17e91c, 0x06060609);
196 mmio(0x17e920, 0x00090a05);
197#undef mmio
198 return 0;
199}
200
201static int
202nve0_graph_context_new(struct nouveau_channel *chan, int engine)
203{
204 struct drm_device *dev = chan->dev;
205 struct drm_nouveau_private *dev_priv = dev->dev_private;
206 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
207 struct nve0_graph_priv *priv = nv_engine(dev, engine);
208 struct nve0_graph_chan *grch;
209 struct nouveau_gpuobj *grctx;
210 int ret, i;
211
212 grch = kzalloc(sizeof(*grch), GFP_KERNEL);
213 if (!grch)
214 return -ENOMEM;
215 chan->engctx[NVOBJ_ENGINE_GR] = grch;
216
217 ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
218 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
219 &grch->grctx);
220 if (ret)
221 goto error;
222 grctx = grch->grctx;
223
224 ret = nve0_graph_create_context_mmio_list(chan);
225 if (ret)
226 goto error;
227
228 nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
229 nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
230 pinstmem->flush(dev);
231
232 if (!priv->grctx_vals) {
233 ret = nve0_graph_construct_context(chan);
234 if (ret)
235 goto error;
236 }
237
238 for (i = 0; i < priv->grctx_size; i += 4)
239 nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
240 nv_wo32(grctx, 0xf4, 0);
241 nv_wo32(grctx, 0xf8, 0);
242 nv_wo32(grctx, 0x10, grch->mmio_nr);
243 nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
244 nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
245 nv_wo32(grctx, 0x1c, 1);
246 nv_wo32(grctx, 0x20, 0);
247 nv_wo32(grctx, 0x28, 0);
248 nv_wo32(grctx, 0x2c, 0);
249
250 pinstmem->flush(dev);
251 return 0;
252
253error:
254 priv->base.context_del(chan, engine);
255 return ret;
256}
257
258static void
259nve0_graph_context_del(struct nouveau_channel *chan, int engine)
260{
261 struct nve0_graph_chan *grch = chan->engctx[engine];
262
263 nouveau_gpuobj_ref(NULL, &grch->mmio);
264 nouveau_gpuobj_ref(NULL, &grch->unk418810);
265 nouveau_gpuobj_ref(NULL, &grch->unk40800c);
266 nouveau_gpuobj_ref(NULL, &grch->unk408004);
267 nouveau_gpuobj_ref(NULL, &grch->grctx);
268 chan->engctx[engine] = NULL;
269}
270
271static int
272nve0_graph_object_new(struct nouveau_channel *chan, int engine,
273 u32 handle, u16 class)
274{
275 return 0;
276}
277
278static int
279nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
280{
281 return 0;
282}
283
284static void
285nve0_graph_init_obj418880(struct drm_device *dev)
286{
287 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
288 int i;
289
290 nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
291 nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
292 for (i = 0; i < 4; i++)
293 nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
294 nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
295 nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
296}
297
298static void
299nve0_graph_init_regs(struct drm_device *dev)
300{
301 nv_wr32(dev, 0x400080, 0x003083c2);
302 nv_wr32(dev, 0x400088, 0x0001ffe7);
303 nv_wr32(dev, 0x40008c, 0x00000000);
304 nv_wr32(dev, 0x400090, 0x00000030);
305 nv_wr32(dev, 0x40013c, 0x003901f7);
306 nv_wr32(dev, 0x400140, 0x00000100);
307 nv_wr32(dev, 0x400144, 0x00000000);
308 nv_wr32(dev, 0x400148, 0x00000110);
309 nv_wr32(dev, 0x400138, 0x00000000);
310 nv_wr32(dev, 0x400130, 0x00000000);
311 nv_wr32(dev, 0x400134, 0x00000000);
312 nv_wr32(dev, 0x400124, 0x00000002);
313}
314
315static void
316nve0_graph_init_units(struct drm_device *dev)
317{
318 nv_wr32(dev, 0x409ffc, 0x00000000);
319 nv_wr32(dev, 0x409c14, 0x00003e3e);
320 nv_wr32(dev, 0x409c24, 0x000f0000);
321
322 nv_wr32(dev, 0x404000, 0xc0000000);
323 nv_wr32(dev, 0x404600, 0xc0000000);
324 nv_wr32(dev, 0x408030, 0xc0000000);
325 nv_wr32(dev, 0x404490, 0xc0000000);
326 nv_wr32(dev, 0x406018, 0xc0000000);
327 nv_wr32(dev, 0x407020, 0xc0000000);
328 nv_wr32(dev, 0x405840, 0xc0000000);
329 nv_wr32(dev, 0x405844, 0x00ffffff);
330
331 nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
332 nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
333
334}
335
336static void
337nve0_graph_init_gpc_0(struct drm_device *dev)
338{
339 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
340 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
341 u32 data[TPC_MAX / 8];
342 u8 tpcnr[GPC_MAX];
343 int i, gpc, tpc;
344
345 nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
346
347 memset(data, 0x00, sizeof(data));
348 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
349 for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
350 do {
351 gpc = (gpc + 1) % priv->gpc_nr;
352 } while (!tpcnr[gpc]);
353 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
354
355 data[i / 8] |= tpc << ((i % 8) * 4);
356 }
357
358 nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
359 nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
360 nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
361 nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
362
363 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
364 nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
365 priv->tpc_nr[gpc]);
366 nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
367 nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
368 }
369
370 nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
371 nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
372}
373
374static void
375nve0_graph_init_gpc_1(struct drm_device *dev)
376{
377 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
378 int gpc, tpc;
379
380 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
381 nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
382 nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
383 nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
384 nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
385 nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
386 for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
387 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
388 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
389 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
390 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
391 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
392 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
393 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
394 }
395 nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
396 nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
397 }
398}
399
400static void
401nve0_graph_init_rop(struct drm_device *dev)
402{
403 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
404 int rop;
405
406 for (rop = 0; rop < priv->rop_nr; rop++) {
407 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
408 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
409 nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
410 nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
411 }
412}
413
414static void
415nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
416 struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
417{
418 int i;
419
420 nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
421 for (i = 0; i < data->size / 4; i++)
422 nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
423
424 nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
425 for (i = 0; i < code->size / 4; i++) {
426 if ((i & 0x3f) == 0)
427 nv_wr32(dev, fuc_base + 0x0188, i >> 6);
428 nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
429 }
430}
431
432static int
433nve0_graph_init_ctxctl(struct drm_device *dev)
434{
435 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
436 u32 r000260;
437
438 /* load fuc microcode */
439 r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
440 nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
441 nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
442 nv_wr32(dev, 0x000260, r000260);
443
444 /* start both of them running */
445 nv_wr32(dev, 0x409840, 0xffffffff);
446 nv_wr32(dev, 0x41a10c, 0x00000000);
447 nv_wr32(dev, 0x40910c, 0x00000000);
448 nv_wr32(dev, 0x41a100, 0x00000002);
449 nv_wr32(dev, 0x409100, 0x00000002);
450 if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
451 NV_INFO(dev, "0x409800 wait failed\n");
452
453 nv_wr32(dev, 0x409840, 0xffffffff);
454 nv_wr32(dev, 0x409500, 0x7fffffff);
455 nv_wr32(dev, 0x409504, 0x00000021);
456
457 nv_wr32(dev, 0x409840, 0xffffffff);
458 nv_wr32(dev, 0x409500, 0x00000000);
459 nv_wr32(dev, 0x409504, 0x00000010);
460 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
461 NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
462 return -EBUSY;
463 }
464 priv->grctx_size = nv_rd32(dev, 0x409800);
465
466 nv_wr32(dev, 0x409840, 0xffffffff);
467 nv_wr32(dev, 0x409500, 0x00000000);
468 nv_wr32(dev, 0x409504, 0x00000016);
469 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
470 NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
471 return -EBUSY;
472 }
473
474 nv_wr32(dev, 0x409840, 0xffffffff);
475 nv_wr32(dev, 0x409500, 0x00000000);
476 nv_wr32(dev, 0x409504, 0x00000025);
477 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
478 NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
479 return -EBUSY;
480 }
481
482 nv_wr32(dev, 0x409800, 0x00000000);
483 nv_wr32(dev, 0x409500, 0x00000001);
484 nv_wr32(dev, 0x409504, 0x00000030);
485 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
486 NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
487 return -EBUSY;
488 }
489
490 nv_wr32(dev, 0x409810, 0xb00095c8);
491 nv_wr32(dev, 0x409800, 0x00000000);
492 nv_wr32(dev, 0x409500, 0x00000001);
493 nv_wr32(dev, 0x409504, 0x00000031);
494 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
495 NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
496 return -EBUSY;
497 }
498
499 nv_wr32(dev, 0x409810, 0x00080420);
500 nv_wr32(dev, 0x409800, 0x00000000);
501 nv_wr32(dev, 0x409500, 0x00000001);
502 nv_wr32(dev, 0x409504, 0x00000032);
503 if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
504 NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
505 return -EBUSY;
506 }
507
508 nv_wr32(dev, 0x409614, 0x00000070);
509 nv_wr32(dev, 0x409614, 0x00000770);
510 nv_wr32(dev, 0x40802c, 0x00000001);
511 return 0;
512}
513
514static int
515nve0_graph_init(struct drm_device *dev, int engine)
516{
517 int ret;
518
519 nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
520 nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
521
522 nve0_graph_init_obj418880(dev);
523 nve0_graph_init_regs(dev);
524 nve0_graph_init_gpc_0(dev);
525
526 nv_wr32(dev, 0x400500, 0x00010001);
527 nv_wr32(dev, 0x400100, 0xffffffff);
528 nv_wr32(dev, 0x40013c, 0xffffffff);
529
530 nve0_graph_init_units(dev);
531 nve0_graph_init_gpc_1(dev);
532 nve0_graph_init_rop(dev);
533
534 nv_wr32(dev, 0x400108, 0xffffffff);
535 nv_wr32(dev, 0x400138, 0xffffffff);
536 nv_wr32(dev, 0x400118, 0xffffffff);
537 nv_wr32(dev, 0x400130, 0xffffffff);
538 nv_wr32(dev, 0x40011c, 0xffffffff);
539 nv_wr32(dev, 0x400134, 0xffffffff);
540 nv_wr32(dev, 0x400054, 0x34ce3464);
541
542 ret = nve0_graph_init_ctxctl(dev);
543 if (ret)
544 return ret;
545
546 return 0;
547}
548
549int
550nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
551{
552 struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
553 struct drm_nouveau_private *dev_priv = dev->dev_private;
554 struct nouveau_channel *chan;
555 unsigned long flags;
556 int i;
557
558 spin_lock_irqsave(&dev_priv->channels.lock, flags);
559 for (i = 0; i < pfifo->channels; i++) {
560 chan = dev_priv->channels.ptr[i];
561 if (!chan || !chan->ramin)
562 continue;
563
564 if (inst == chan->ramin->vinst)
565 break;
566 }
567 spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
568 return i;
569}
570
571static void
572nve0_graph_ctxctl_isr(struct drm_device *dev)
573{
574 u32 ustat = nv_rd32(dev, 0x409c18);
575
576 if (ustat & 0x00000001)
577 NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
578 if (ustat & 0x00080000)
579 NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
580 if (ustat & ~0x00080001)
581 NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
582
583 nve0_graph_ctxctl_debug(dev);
584 nv_wr32(dev, 0x409c20, ustat);
585}
586
587static void
588nve0_graph_trap_isr(struct drm_device *dev, int chid)
589{
590 struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
591 u32 trap = nv_rd32(dev, 0x400108);
592 int rop;
593
594 if (trap & 0x00000001) {
595 u32 stat = nv_rd32(dev, 0x404000);
596 NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
597 nv_wr32(dev, 0x404000, 0xc0000000);
598 nv_wr32(dev, 0x400108, 0x00000001);
599 trap &= ~0x00000001;
600 }
601
602 if (trap & 0x00000010) {
603 u32 stat = nv_rd32(dev, 0x405840);
604 NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
605 nv_wr32(dev, 0x405840, 0xc0000000);
606 nv_wr32(dev, 0x400108, 0x00000010);
607 trap &= ~0x00000010;
608 }
609
610 if (trap & 0x02000000) {
611 for (rop = 0; rop < priv->rop_nr; rop++) {
612 u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
613 u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
614 NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
615 rop, chid, statz, statc);
616 nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
617 nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
618 }
619 nv_wr32(dev, 0x400108, 0x02000000);
620 trap &= ~0x02000000;
621 }
622
623 if (trap) {
624 NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
625 nv_wr32(dev, 0x400108, trap);
626 }
627}
628
629static void
630nve0_graph_isr(struct drm_device *dev)
631{
632 u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
633 u32 chid = nve0_graph_isr_chid(dev, inst);
634 u32 stat = nv_rd32(dev, 0x400100);
635 u32 addr = nv_rd32(dev, 0x400704);
636 u32 mthd = (addr & 0x00003ffc);
637 u32 subc = (addr & 0x00070000) >> 16;
638 u32 data = nv_rd32(dev, 0x400708);
639 u32 code = nv_rd32(dev, 0x400110);
640 u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
641
642 if (stat & 0x00000010) {
643 if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
644 NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
645 "subc %d class 0x%04x mthd 0x%04x "
646 "data 0x%08x\n",
647 chid, inst, subc, class, mthd, data);
648 }
649 nv_wr32(dev, 0x400100, 0x00000010);
650 stat &= ~0x00000010;
651 }
652
653 if (stat & 0x00000020) {
654 NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
655 "class 0x%04x mthd 0x%04x data 0x%08x\n",
656 chid, inst, subc, class, mthd, data);
657 nv_wr32(dev, 0x400100, 0x00000020);
658 stat &= ~0x00000020;
659 }
660
661 if (stat & 0x00100000) {
662 NV_INFO(dev, "PGRAPH: DATA_ERROR [");
663 nouveau_enum_print(nv50_data_error_names, code);
664 printk("] ch %d [0x%010llx] subc %d class 0x%04x "
665 "mthd 0x%04x data 0x%08x\n",
666 chid, inst, subc, class, mthd, data);
667 nv_wr32(dev, 0x400100, 0x00100000);
668 stat &= ~0x00100000;
669 }
670
671 if (stat & 0x00200000) {
672 nve0_graph_trap_isr(dev, chid);
673 nv_wr32(dev, 0x400100, 0x00200000);
674 stat &= ~0x00200000;
675 }
676
677 if (stat & 0x00080000) {
678 nve0_graph_ctxctl_isr(dev);
679 nv_wr32(dev, 0x400100, 0x00080000);
680 stat &= ~0x00080000;
681 }
682
683 if (stat) {
684 NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
685 nv_wr32(dev, 0x400100, stat);
686 }
687
688 nv_wr32(dev, 0x400500, 0x00010001);
689}
690
691static int
692nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
693 struct nve0_graph_fuc *fuc)
694{
695 struct drm_nouveau_private *dev_priv = dev->dev_private;
696 const struct firmware *fw;
697 char f[32];
698 int ret;
699
700 snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
701 ret = request_firmware(&fw, f, &dev->pdev->dev);
702 if (ret)
703 return ret;
704
705 fuc->size = fw->size;
706 fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
707 release_firmware(fw);
708 return (fuc->data != NULL) ? 0 : -ENOMEM;
709}
710
711static void
712nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
713{
714 if (fuc->data) {
715 kfree(fuc->data);
716 fuc->data = NULL;
717 }
718}
719
720static void
721nve0_graph_destroy(struct drm_device *dev, int engine)
722{
723 struct nve0_graph_priv *priv = nv_engine(dev, engine);
724
725 nve0_graph_destroy_fw(&priv->fuc409c);
726 nve0_graph_destroy_fw(&priv->fuc409d);
727 nve0_graph_destroy_fw(&priv->fuc41ac);
728 nve0_graph_destroy_fw(&priv->fuc41ad);
729
730 nouveau_irq_unregister(dev, 12);
731
732 nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
733 nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
734
735 if (priv->grctx_vals)
736 kfree(priv->grctx_vals);
737
738 NVOBJ_ENGINE_DEL(dev, GR);
739 kfree(priv);
740}
741
742int
743nve0_graph_create(struct drm_device *dev)
744{
745 struct drm_nouveau_private *dev_priv = dev->dev_private;
746 struct nve0_graph_priv *priv;
747 int ret, gpc, i;
748 u32 kepler;
749
750 kepler = nve0_graph_class(dev);
751 if (!kepler) {
752 NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
753 return 0;
754 }
755
756 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
757 if (!priv)
758 return -ENOMEM;
759
760 priv->base.destroy = nve0_graph_destroy;
761 priv->base.init = nve0_graph_init;
762 priv->base.fini = nve0_graph_fini;
763 priv->base.context_new = nve0_graph_context_new;
764 priv->base.context_del = nve0_graph_context_del;
765 priv->base.object_new = nve0_graph_object_new;
766
767 NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
768 nouveau_irq_register(dev, 12, nve0_graph_isr);
769
770 NV_INFO(dev, "PGRAPH: using external firmware\n");
771 if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
772 nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
773 nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
774 nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
775 ret = 0;
776 goto error;
777 }
778
779 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
780 if (ret)
781 goto error;
782
783 ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
784 if (ret)
785 goto error;
786
787 for (i = 0; i < 0x1000; i += 4) {
788 nv_wo32(priv->unk4188b4, i, 0x00000010);
789 nv_wo32(priv->unk4188b8, i, 0x00000010);
790 }
791
792 priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
793 priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
794 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
795 priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
796 priv->tpc_total += priv->tpc_nr[gpc];
797 }
798
799 switch (dev_priv->chipset) {
800 case 0xe4:
801 if (priv->tpc_total == 8)
802 priv->magic_not_rop_nr = 3;
803 else
804 if (priv->tpc_total == 7)
805 priv->magic_not_rop_nr = 1;
806 break;
807 case 0xe7:
808 priv->magic_not_rop_nr = 1;
809 break;
810 default:
811 break;
812 }
813
814 if (!priv->magic_not_rop_nr) {
815 NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
816 priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
817 priv->tpc_nr[3], priv->rop_nr);
818 priv->magic_not_rop_nr = 0x00;
819 }
820
821 NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
822 NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
823 NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
824 NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
825 NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
826 return 0;
827
828error:
829 nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
830 return ret;
831}
diff --git a/drivers/gpu/drm/nouveau/nve0_graph.h b/drivers/gpu/drm/nouveau/nve0_graph.h
deleted file mode 100644
index 2ba70449ba01..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_graph.h
+++ /dev/null
@@ -1,89 +0,0 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#ifndef __NVE0_GRAPH_H__
26#define __NVE0_GRAPH_H__
27
28#define GPC_MAX 4
29#define TPC_MAX 32
30
31#define ROP_BCAST(r) (0x408800 + (r))
32#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
33#define GPC_BCAST(r) (0x418000 + (r))
34#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
35#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
36
37struct nve0_graph_fuc {
38 u32 *data;
39 u32 size;
40};
41
42struct nve0_graph_priv {
43 struct nouveau_exec_engine base;
44
45 struct nve0_graph_fuc fuc409c;
46 struct nve0_graph_fuc fuc409d;
47 struct nve0_graph_fuc fuc41ac;
48 struct nve0_graph_fuc fuc41ad;
49
50 u8 gpc_nr;
51 u8 rop_nr;
52 u8 tpc_nr[GPC_MAX];
53 u8 tpc_total;
54
55 u32 grctx_size;
56 u32 *grctx_vals;
57 struct nouveau_gpuobj *unk4188b4;
58 struct nouveau_gpuobj *unk4188b8;
59
60 u8 magic_not_rop_nr;
61};
62
63struct nve0_graph_chan {
64 struct nouveau_gpuobj *grctx;
65 struct nouveau_gpuobj *unk408004; /* 0x418810 too */
66 struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
67 struct nouveau_gpuobj *unk418810; /* 0x419848 too */
68 struct nouveau_gpuobj *mmio;
69 int mmio_nr;
70};
71
72int nve0_grctx_generate(struct nouveau_channel *);
73
74/* nve0_graph.c uses this also to determine supported chipsets */
75static inline u32
76nve0_graph_class(struct drm_device *dev)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80 switch (dev_priv->chipset) {
81 case 0xe4:
82 case 0xe7:
83 return 0xa097;
84 default:
85 return 0;
86 }
87}
88
89#endif
diff --git a/drivers/gpu/drm/nouveau/nve0_grctx.c b/drivers/gpu/drm/nouveau/nve0_grctx.c
deleted file mode 100644
index d3a802987972..000000000000
--- a/drivers/gpu/drm/nouveau/nve0_grctx.c
+++ /dev/null
@@ -1,2777 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drv.h"
27#include "nouveau_mm.h"
28#include "nve0_graph.h"
29
30static void
31nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
32{
33 nv_wr32(dev, 0x400204, data);
34 nv_wr32(dev, 0x400200, icmd);
35 while (nv_rd32(dev, 0x400700) & 0x00000002) {}
36}
37
38static void
39nve0_grctx_generate_icmd(struct drm_device *dev)
40{
41 nv_wr32(dev, 0x400208, 0x80000000);
42 nv_icmd(dev, 0x001000, 0x00000004);
43 nv_icmd(dev, 0x000039, 0x00000000);
44 nv_icmd(dev, 0x00003a, 0x00000000);
45 nv_icmd(dev, 0x00003b, 0x00000000);
46 nv_icmd(dev, 0x0000a9, 0x0000ffff);
47 nv_icmd(dev, 0x000038, 0x0fac6881);
48 nv_icmd(dev, 0x00003d, 0x00000001);
49 nv_icmd(dev, 0x0000e8, 0x00000400);
50 nv_icmd(dev, 0x0000e9, 0x00000400);
51 nv_icmd(dev, 0x0000ea, 0x00000400);
52 nv_icmd(dev, 0x0000eb, 0x00000400);
53 nv_icmd(dev, 0x0000ec, 0x00000400);
54 nv_icmd(dev, 0x0000ed, 0x00000400);
55 nv_icmd(dev, 0x0000ee, 0x00000400);
56 nv_icmd(dev, 0x0000ef, 0x00000400);
57 nv_icmd(dev, 0x000078, 0x00000300);
58 nv_icmd(dev, 0x000079, 0x00000300);
59 nv_icmd(dev, 0x00007a, 0x00000300);
60 nv_icmd(dev, 0x00007b, 0x00000300);
61 nv_icmd(dev, 0x00007c, 0x00000300);
62 nv_icmd(dev, 0x00007d, 0x00000300);
63 nv_icmd(dev, 0x00007e, 0x00000300);
64 nv_icmd(dev, 0x00007f, 0x00000300);
65 nv_icmd(dev, 0x000050, 0x00000011);
66 nv_icmd(dev, 0x000058, 0x00000008);
67 nv_icmd(dev, 0x000059, 0x00000008);
68 nv_icmd(dev, 0x00005a, 0x00000008);
69 nv_icmd(dev, 0x00005b, 0x00000008);
70 nv_icmd(dev, 0x00005c, 0x00000008);
71 nv_icmd(dev, 0x00005d, 0x00000008);
72 nv_icmd(dev, 0x00005e, 0x00000008);
73 nv_icmd(dev, 0x00005f, 0x00000008);
74 nv_icmd(dev, 0x000208, 0x00000001);
75 nv_icmd(dev, 0x000209, 0x00000001);
76 nv_icmd(dev, 0x00020a, 0x00000001);
77 nv_icmd(dev, 0x00020b, 0x00000001);
78 nv_icmd(dev, 0x00020c, 0x00000001);
79 nv_icmd(dev, 0x00020d, 0x00000001);
80 nv_icmd(dev, 0x00020e, 0x00000001);
81 nv_icmd(dev, 0x00020f, 0x00000001);
82 nv_icmd(dev, 0x000081, 0x00000001);
83 nv_icmd(dev, 0x000085, 0x00000004);
84 nv_icmd(dev, 0x000088, 0x00000400);
85 nv_icmd(dev, 0x000090, 0x00000300);
86 nv_icmd(dev, 0x000098, 0x00001001);
87 nv_icmd(dev, 0x0000e3, 0x00000001);
88 nv_icmd(dev, 0x0000da, 0x00000001);
89 nv_icmd(dev, 0x0000f8, 0x00000003);
90 nv_icmd(dev, 0x0000fa, 0x00000001);
91 nv_icmd(dev, 0x00009f, 0x0000ffff);
92 nv_icmd(dev, 0x0000a0, 0x0000ffff);
93 nv_icmd(dev, 0x0000a1, 0x0000ffff);
94 nv_icmd(dev, 0x0000a2, 0x0000ffff);
95 nv_icmd(dev, 0x0000b1, 0x00000001);
96 nv_icmd(dev, 0x0000ad, 0x0000013e);
97 nv_icmd(dev, 0x0000e1, 0x00000010);
98 nv_icmd(dev, 0x000290, 0x00000000);
99 nv_icmd(dev, 0x000291, 0x00000000);
100 nv_icmd(dev, 0x000292, 0x00000000);
101 nv_icmd(dev, 0x000293, 0x00000000);
102 nv_icmd(dev, 0x000294, 0x00000000);
103 nv_icmd(dev, 0x000295, 0x00000000);
104 nv_icmd(dev, 0x000296, 0x00000000);
105 nv_icmd(dev, 0x000297, 0x00000000);
106 nv_icmd(dev, 0x000298, 0x00000000);
107 nv_icmd(dev, 0x000299, 0x00000000);
108 nv_icmd(dev, 0x00029a, 0x00000000);
109 nv_icmd(dev, 0x00029b, 0x00000000);
110 nv_icmd(dev, 0x00029c, 0x00000000);
111 nv_icmd(dev, 0x00029d, 0x00000000);
112 nv_icmd(dev, 0x00029e, 0x00000000);
113 nv_icmd(dev, 0x00029f, 0x00000000);
114 nv_icmd(dev, 0x0003b0, 0x00000000);
115 nv_icmd(dev, 0x0003b1, 0x00000000);
116 nv_icmd(dev, 0x0003b2, 0x00000000);
117 nv_icmd(dev, 0x0003b3, 0x00000000);
118 nv_icmd(dev, 0x0003b4, 0x00000000);
119 nv_icmd(dev, 0x0003b5, 0x00000000);
120 nv_icmd(dev, 0x0003b6, 0x00000000);
121 nv_icmd(dev, 0x0003b7, 0x00000000);
122 nv_icmd(dev, 0x0003b8, 0x00000000);
123 nv_icmd(dev, 0x0003b9, 0x00000000);
124 nv_icmd(dev, 0x0003ba, 0x00000000);
125 nv_icmd(dev, 0x0003bb, 0x00000000);
126 nv_icmd(dev, 0x0003bc, 0x00000000);
127 nv_icmd(dev, 0x0003bd, 0x00000000);
128 nv_icmd(dev, 0x0003be, 0x00000000);
129 nv_icmd(dev, 0x0003bf, 0x00000000);
130 nv_icmd(dev, 0x0002a0, 0x00000000);
131 nv_icmd(dev, 0x0002a1, 0x00000000);
132 nv_icmd(dev, 0x0002a2, 0x00000000);
133 nv_icmd(dev, 0x0002a3, 0x00000000);
134 nv_icmd(dev, 0x0002a4, 0x00000000);
135 nv_icmd(dev, 0x0002a5, 0x00000000);
136 nv_icmd(dev, 0x0002a6, 0x00000000);
137 nv_icmd(dev, 0x0002a7, 0x00000000);
138 nv_icmd(dev, 0x0002a8, 0x00000000);
139 nv_icmd(dev, 0x0002a9, 0x00000000);
140 nv_icmd(dev, 0x0002aa, 0x00000000);
141 nv_icmd(dev, 0x0002ab, 0x00000000);
142 nv_icmd(dev, 0x0002ac, 0x00000000);
143 nv_icmd(dev, 0x0002ad, 0x00000000);
144 nv_icmd(dev, 0x0002ae, 0x00000000);
145 nv_icmd(dev, 0x0002af, 0x00000000);
146 nv_icmd(dev, 0x000420, 0x00000000);
147 nv_icmd(dev, 0x000421, 0x00000000);
148 nv_icmd(dev, 0x000422, 0x00000000);
149 nv_icmd(dev, 0x000423, 0x00000000);
150 nv_icmd(dev, 0x000424, 0x00000000);
151 nv_icmd(dev, 0x000425, 0x00000000);
152 nv_icmd(dev, 0x000426, 0x00000000);
153 nv_icmd(dev, 0x000427, 0x00000000);
154 nv_icmd(dev, 0x000428, 0x00000000);
155 nv_icmd(dev, 0x000429, 0x00000000);
156 nv_icmd(dev, 0x00042a, 0x00000000);
157 nv_icmd(dev, 0x00042b, 0x00000000);
158 nv_icmd(dev, 0x00042c, 0x00000000);
159 nv_icmd(dev, 0x00042d, 0x00000000);
160 nv_icmd(dev, 0x00042e, 0x00000000);
161 nv_icmd(dev, 0x00042f, 0x00000000);
162 nv_icmd(dev, 0x0002b0, 0x00000000);
163 nv_icmd(dev, 0x0002b1, 0x00000000);
164 nv_icmd(dev, 0x0002b2, 0x00000000);
165 nv_icmd(dev, 0x0002b3, 0x00000000);
166 nv_icmd(dev, 0x0002b4, 0x00000000);
167 nv_icmd(dev, 0x0002b5, 0x00000000);
168 nv_icmd(dev, 0x0002b6, 0x00000000);
169 nv_icmd(dev, 0x0002b7, 0x00000000);
170 nv_icmd(dev, 0x0002b8, 0x00000000);
171 nv_icmd(dev, 0x0002b9, 0x00000000);
172 nv_icmd(dev, 0x0002ba, 0x00000000);
173 nv_icmd(dev, 0x0002bb, 0x00000000);
174 nv_icmd(dev, 0x0002bc, 0x00000000);
175 nv_icmd(dev, 0x0002bd, 0x00000000);
176 nv_icmd(dev, 0x0002be, 0x00000000);
177 nv_icmd(dev, 0x0002bf, 0x00000000);
178 nv_icmd(dev, 0x000430, 0x00000000);
179 nv_icmd(dev, 0x000431, 0x00000000);
180 nv_icmd(dev, 0x000432, 0x00000000);
181 nv_icmd(dev, 0x000433, 0x00000000);
182 nv_icmd(dev, 0x000434, 0x00000000);
183 nv_icmd(dev, 0x000435, 0x00000000);
184 nv_icmd(dev, 0x000436, 0x00000000);
185 nv_icmd(dev, 0x000437, 0x00000000);
186 nv_icmd(dev, 0x000438, 0x00000000);
187 nv_icmd(dev, 0x000439, 0x00000000);
188 nv_icmd(dev, 0x00043a, 0x00000000);
189 nv_icmd(dev, 0x00043b, 0x00000000);
190 nv_icmd(dev, 0x00043c, 0x00000000);
191 nv_icmd(dev, 0x00043d, 0x00000000);
192 nv_icmd(dev, 0x00043e, 0x00000000);
193 nv_icmd(dev, 0x00043f, 0x00000000);
194 nv_icmd(dev, 0x0002c0, 0x00000000);
195 nv_icmd(dev, 0x0002c1, 0x00000000);
196 nv_icmd(dev, 0x0002c2, 0x00000000);
197 nv_icmd(dev, 0x0002c3, 0x00000000);
198 nv_icmd(dev, 0x0002c4, 0x00000000);
199 nv_icmd(dev, 0x0002c5, 0x00000000);
200 nv_icmd(dev, 0x0002c6, 0x00000000);
201 nv_icmd(dev, 0x0002c7, 0x00000000);
202 nv_icmd(dev, 0x0002c8, 0x00000000);
203 nv_icmd(dev, 0x0002c9, 0x00000000);
204 nv_icmd(dev, 0x0002ca, 0x00000000);
205 nv_icmd(dev, 0x0002cb, 0x00000000);
206 nv_icmd(dev, 0x0002cc, 0x00000000);
207 nv_icmd(dev, 0x0002cd, 0x00000000);
208 nv_icmd(dev, 0x0002ce, 0x00000000);
209 nv_icmd(dev, 0x0002cf, 0x00000000);
210 nv_icmd(dev, 0x0004d0, 0x00000000);
211 nv_icmd(dev, 0x0004d1, 0x00000000);
212 nv_icmd(dev, 0x0004d2, 0x00000000);
213 nv_icmd(dev, 0x0004d3, 0x00000000);
214 nv_icmd(dev, 0x0004d4, 0x00000000);
215 nv_icmd(dev, 0x0004d5, 0x00000000);
216 nv_icmd(dev, 0x0004d6, 0x00000000);
217 nv_icmd(dev, 0x0004d7, 0x00000000);
218 nv_icmd(dev, 0x0004d8, 0x00000000);
219 nv_icmd(dev, 0x0004d9, 0x00000000);
220 nv_icmd(dev, 0x0004da, 0x00000000);
221 nv_icmd(dev, 0x0004db, 0x00000000);
222 nv_icmd(dev, 0x0004dc, 0x00000000);
223 nv_icmd(dev, 0x0004dd, 0x00000000);
224 nv_icmd(dev, 0x0004de, 0x00000000);
225 nv_icmd(dev, 0x0004df, 0x00000000);
226 nv_icmd(dev, 0x000720, 0x00000000);
227 nv_icmd(dev, 0x000721, 0x00000000);
228 nv_icmd(dev, 0x000722, 0x00000000);
229 nv_icmd(dev, 0x000723, 0x00000000);
230 nv_icmd(dev, 0x000724, 0x00000000);
231 nv_icmd(dev, 0x000725, 0x00000000);
232 nv_icmd(dev, 0x000726, 0x00000000);
233 nv_icmd(dev, 0x000727, 0x00000000);
234 nv_icmd(dev, 0x000728, 0x00000000);
235 nv_icmd(dev, 0x000729, 0x00000000);
236 nv_icmd(dev, 0x00072a, 0x00000000);
237 nv_icmd(dev, 0x00072b, 0x00000000);
238 nv_icmd(dev, 0x00072c, 0x00000000);
239 nv_icmd(dev, 0x00072d, 0x00000000);
240 nv_icmd(dev, 0x00072e, 0x00000000);
241 nv_icmd(dev, 0x00072f, 0x00000000);
242 nv_icmd(dev, 0x0008c0, 0x00000000);
243 nv_icmd(dev, 0x0008c1, 0x00000000);
244 nv_icmd(dev, 0x0008c2, 0x00000000);
245 nv_icmd(dev, 0x0008c3, 0x00000000);
246 nv_icmd(dev, 0x0008c4, 0x00000000);
247 nv_icmd(dev, 0x0008c5, 0x00000000);
248 nv_icmd(dev, 0x0008c6, 0x00000000);
249 nv_icmd(dev, 0x0008c7, 0x00000000);
250 nv_icmd(dev, 0x0008c8, 0x00000000);
251 nv_icmd(dev, 0x0008c9, 0x00000000);
252 nv_icmd(dev, 0x0008ca, 0x00000000);
253 nv_icmd(dev, 0x0008cb, 0x00000000);
254 nv_icmd(dev, 0x0008cc, 0x00000000);
255 nv_icmd(dev, 0x0008cd, 0x00000000);
256 nv_icmd(dev, 0x0008ce, 0x00000000);
257 nv_icmd(dev, 0x0008cf, 0x00000000);
258 nv_icmd(dev, 0x000890, 0x00000000);
259 nv_icmd(dev, 0x000891, 0x00000000);
260 nv_icmd(dev, 0x000892, 0x00000000);
261 nv_icmd(dev, 0x000893, 0x00000000);
262 nv_icmd(dev, 0x000894, 0x00000000);
263 nv_icmd(dev, 0x000895, 0x00000000);
264 nv_icmd(dev, 0x000896, 0x00000000);
265 nv_icmd(dev, 0x000897, 0x00000000);
266 nv_icmd(dev, 0x000898, 0x00000000);
267 nv_icmd(dev, 0x000899, 0x00000000);
268 nv_icmd(dev, 0x00089a, 0x00000000);
269 nv_icmd(dev, 0x00089b, 0x00000000);
270 nv_icmd(dev, 0x00089c, 0x00000000);
271 nv_icmd(dev, 0x00089d, 0x00000000);
272 nv_icmd(dev, 0x00089e, 0x00000000);
273 nv_icmd(dev, 0x00089f, 0x00000000);
274 nv_icmd(dev, 0x0008e0, 0x00000000);
275 nv_icmd(dev, 0x0008e1, 0x00000000);
276 nv_icmd(dev, 0x0008e2, 0x00000000);
277 nv_icmd(dev, 0x0008e3, 0x00000000);
278 nv_icmd(dev, 0x0008e4, 0x00000000);
279 nv_icmd(dev, 0x0008e5, 0x00000000);
280 nv_icmd(dev, 0x0008e6, 0x00000000);
281 nv_icmd(dev, 0x0008e7, 0x00000000);
282 nv_icmd(dev, 0x0008e8, 0x00000000);
283 nv_icmd(dev, 0x0008e9, 0x00000000);
284 nv_icmd(dev, 0x0008ea, 0x00000000);
285 nv_icmd(dev, 0x0008eb, 0x00000000);
286 nv_icmd(dev, 0x0008ec, 0x00000000);
287 nv_icmd(dev, 0x0008ed, 0x00000000);
288 nv_icmd(dev, 0x0008ee, 0x00000000);
289 nv_icmd(dev, 0x0008ef, 0x00000000);
290 nv_icmd(dev, 0x0008a0, 0x00000000);
291 nv_icmd(dev, 0x0008a1, 0x00000000);
292 nv_icmd(dev, 0x0008a2, 0x00000000);
293 nv_icmd(dev, 0x0008a3, 0x00000000);
294 nv_icmd(dev, 0x0008a4, 0x00000000);
295 nv_icmd(dev, 0x0008a5, 0x00000000);
296 nv_icmd(dev, 0x0008a6, 0x00000000);
297 nv_icmd(dev, 0x0008a7, 0x00000000);
298 nv_icmd(dev, 0x0008a8, 0x00000000);
299 nv_icmd(dev, 0x0008a9, 0x00000000);
300 nv_icmd(dev, 0x0008aa, 0x00000000);
301 nv_icmd(dev, 0x0008ab, 0x00000000);
302 nv_icmd(dev, 0x0008ac, 0x00000000);
303 nv_icmd(dev, 0x0008ad, 0x00000000);
304 nv_icmd(dev, 0x0008ae, 0x00000000);
305 nv_icmd(dev, 0x0008af, 0x00000000);
306 nv_icmd(dev, 0x0008f0, 0x00000000);
307 nv_icmd(dev, 0x0008f1, 0x00000000);
308 nv_icmd(dev, 0x0008f2, 0x00000000);
309 nv_icmd(dev, 0x0008f3, 0x00000000);
310 nv_icmd(dev, 0x0008f4, 0x00000000);
311 nv_icmd(dev, 0x0008f5, 0x00000000);
312 nv_icmd(dev, 0x0008f6, 0x00000000);
313 nv_icmd(dev, 0x0008f7, 0x00000000);
314 nv_icmd(dev, 0x0008f8, 0x00000000);
315 nv_icmd(dev, 0x0008f9, 0x00000000);
316 nv_icmd(dev, 0x0008fa, 0x00000000);
317 nv_icmd(dev, 0x0008fb, 0x00000000);
318 nv_icmd(dev, 0x0008fc, 0x00000000);
319 nv_icmd(dev, 0x0008fd, 0x00000000);
320 nv_icmd(dev, 0x0008fe, 0x00000000);
321 nv_icmd(dev, 0x0008ff, 0x00000000);
322 nv_icmd(dev, 0x00094c, 0x000000ff);
323 nv_icmd(dev, 0x00094d, 0xffffffff);
324 nv_icmd(dev, 0x00094e, 0x00000002);
325 nv_icmd(dev, 0x0002ec, 0x00000001);
326 nv_icmd(dev, 0x000303, 0x00000001);
327 nv_icmd(dev, 0x0002e6, 0x00000001);
328 nv_icmd(dev, 0x000466, 0x00000052);
329 nv_icmd(dev, 0x000301, 0x3f800000);
330 nv_icmd(dev, 0x000304, 0x30201000);
331 nv_icmd(dev, 0x000305, 0x70605040);
332 nv_icmd(dev, 0x000306, 0xb8a89888);
333 nv_icmd(dev, 0x000307, 0xf8e8d8c8);
334 nv_icmd(dev, 0x00030a, 0x00ffff00);
335 nv_icmd(dev, 0x00030b, 0x0000001a);
336 nv_icmd(dev, 0x00030c, 0x00000001);
337 nv_icmd(dev, 0x000318, 0x00000001);
338 nv_icmd(dev, 0x000340, 0x00000000);
339 nv_icmd(dev, 0x000375, 0x00000001);
340 nv_icmd(dev, 0x00037d, 0x00000006);
341 nv_icmd(dev, 0x0003a0, 0x00000002);
342 nv_icmd(dev, 0x0003aa, 0x00000001);
343 nv_icmd(dev, 0x0003a9, 0x00000001);
344 nv_icmd(dev, 0x000380, 0x00000001);
345 nv_icmd(dev, 0x000383, 0x00000011);
346 nv_icmd(dev, 0x000360, 0x00000040);
347 nv_icmd(dev, 0x000366, 0x00000000);
348 nv_icmd(dev, 0x000367, 0x00000000);
349 nv_icmd(dev, 0x000368, 0x00000fff);
350 nv_icmd(dev, 0x000370, 0x00000000);
351 nv_icmd(dev, 0x000371, 0x00000000);
352 nv_icmd(dev, 0x000372, 0x000fffff);
353 nv_icmd(dev, 0x00037a, 0x00000012);
354 nv_icmd(dev, 0x000619, 0x00000003);
355 nv_icmd(dev, 0x000811, 0x00000003);
356 nv_icmd(dev, 0x000812, 0x00000004);
357 nv_icmd(dev, 0x000813, 0x00000006);
358 nv_icmd(dev, 0x000814, 0x00000008);
359 nv_icmd(dev, 0x000815, 0x0000000b);
360 nv_icmd(dev, 0x000800, 0x00000001);
361 nv_icmd(dev, 0x000801, 0x00000001);
362 nv_icmd(dev, 0x000802, 0x00000001);
363 nv_icmd(dev, 0x000803, 0x00000001);
364 nv_icmd(dev, 0x000804, 0x00000001);
365 nv_icmd(dev, 0x000805, 0x00000001);
366 nv_icmd(dev, 0x000632, 0x00000001);
367 nv_icmd(dev, 0x000633, 0x00000002);
368 nv_icmd(dev, 0x000634, 0x00000003);
369 nv_icmd(dev, 0x000635, 0x00000004);
370 nv_icmd(dev, 0x000654, 0x3f800000);
371 nv_icmd(dev, 0x000657, 0x3f800000);
372 nv_icmd(dev, 0x000655, 0x3f800000);
373 nv_icmd(dev, 0x000656, 0x3f800000);
374 nv_icmd(dev, 0x0006cd, 0x3f800000);
375 nv_icmd(dev, 0x0007f5, 0x3f800000);
376 nv_icmd(dev, 0x0007dc, 0x39291909);
377 nv_icmd(dev, 0x0007dd, 0x79695949);
378 nv_icmd(dev, 0x0007de, 0xb9a99989);
379 nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
380 nv_icmd(dev, 0x0007e8, 0x00003210);
381 nv_icmd(dev, 0x0007e9, 0x00007654);
382 nv_icmd(dev, 0x0007ea, 0x00000098);
383 nv_icmd(dev, 0x0007ec, 0x39291909);
384 nv_icmd(dev, 0x0007ed, 0x79695949);
385 nv_icmd(dev, 0x0007ee, 0xb9a99989);
386 nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
387 nv_icmd(dev, 0x0007f0, 0x00003210);
388 nv_icmd(dev, 0x0007f1, 0x00007654);
389 nv_icmd(dev, 0x0007f2, 0x00000098);
390 nv_icmd(dev, 0x0005a5, 0x00000001);
391 nv_icmd(dev, 0x000980, 0x00000000);
392 nv_icmd(dev, 0x000981, 0x00000000);
393 nv_icmd(dev, 0x000982, 0x00000000);
394 nv_icmd(dev, 0x000983, 0x00000000);
395 nv_icmd(dev, 0x000984, 0x00000000);
396 nv_icmd(dev, 0x000985, 0x00000000);
397 nv_icmd(dev, 0x000986, 0x00000000);
398 nv_icmd(dev, 0x000987, 0x00000000);
399 nv_icmd(dev, 0x000988, 0x00000000);
400 nv_icmd(dev, 0x000989, 0x00000000);
401 nv_icmd(dev, 0x00098a, 0x00000000);
402 nv_icmd(dev, 0x00098b, 0x00000000);
403 nv_icmd(dev, 0x00098c, 0x00000000);
404 nv_icmd(dev, 0x00098d, 0x00000000);
405 nv_icmd(dev, 0x00098e, 0x00000000);
406 nv_icmd(dev, 0x00098f, 0x00000000);
407 nv_icmd(dev, 0x000990, 0x00000000);
408 nv_icmd(dev, 0x000991, 0x00000000);
409 nv_icmd(dev, 0x000992, 0x00000000);
410 nv_icmd(dev, 0x000993, 0x00000000);
411 nv_icmd(dev, 0x000994, 0x00000000);
412 nv_icmd(dev, 0x000995, 0x00000000);
413 nv_icmd(dev, 0x000996, 0x00000000);
414 nv_icmd(dev, 0x000997, 0x00000000);
415 nv_icmd(dev, 0x000998, 0x00000000);
416 nv_icmd(dev, 0x000999, 0x00000000);
417 nv_icmd(dev, 0x00099a, 0x00000000);
418 nv_icmd(dev, 0x00099b, 0x00000000);
419 nv_icmd(dev, 0x00099c, 0x00000000);
420 nv_icmd(dev, 0x00099d, 0x00000000);
421 nv_icmd(dev, 0x00099e, 0x00000000);
422 nv_icmd(dev, 0x00099f, 0x00000000);
423 nv_icmd(dev, 0x0009a0, 0x00000000);
424 nv_icmd(dev, 0x0009a1, 0x00000000);
425 nv_icmd(dev, 0x0009a2, 0x00000000);
426 nv_icmd(dev, 0x0009a3, 0x00000000);
427 nv_icmd(dev, 0x0009a4, 0x00000000);
428 nv_icmd(dev, 0x0009a5, 0x00000000);
429 nv_icmd(dev, 0x0009a6, 0x00000000);
430 nv_icmd(dev, 0x0009a7, 0x00000000);
431 nv_icmd(dev, 0x0009a8, 0x00000000);
432 nv_icmd(dev, 0x0009a9, 0x00000000);
433 nv_icmd(dev, 0x0009aa, 0x00000000);
434 nv_icmd(dev, 0x0009ab, 0x00000000);
435 nv_icmd(dev, 0x0009ac, 0x00000000);
436 nv_icmd(dev, 0x0009ad, 0x00000000);
437 nv_icmd(dev, 0x0009ae, 0x00000000);
438 nv_icmd(dev, 0x0009af, 0x00000000);
439 nv_icmd(dev, 0x0009b0, 0x00000000);
440 nv_icmd(dev, 0x0009b1, 0x00000000);
441 nv_icmd(dev, 0x0009b2, 0x00000000);
442 nv_icmd(dev, 0x0009b3, 0x00000000);
443 nv_icmd(dev, 0x0009b4, 0x00000000);
444 nv_icmd(dev, 0x0009b5, 0x00000000);
445 nv_icmd(dev, 0x0009b6, 0x00000000);
446 nv_icmd(dev, 0x0009b7, 0x00000000);
447 nv_icmd(dev, 0x0009b8, 0x00000000);
448 nv_icmd(dev, 0x0009b9, 0x00000000);
449 nv_icmd(dev, 0x0009ba, 0x00000000);
450 nv_icmd(dev, 0x0009bb, 0x00000000);
451 nv_icmd(dev, 0x0009bc, 0x00000000);
452 nv_icmd(dev, 0x0009bd, 0x00000000);
453 nv_icmd(dev, 0x0009be, 0x00000000);
454 nv_icmd(dev, 0x0009bf, 0x00000000);
455 nv_icmd(dev, 0x0009c0, 0x00000000);
456 nv_icmd(dev, 0x0009c1, 0x00000000);
457 nv_icmd(dev, 0x0009c2, 0x00000000);
458 nv_icmd(dev, 0x0009c3, 0x00000000);
459 nv_icmd(dev, 0x0009c4, 0x00000000);
460 nv_icmd(dev, 0x0009c5, 0x00000000);
461 nv_icmd(dev, 0x0009c6, 0x00000000);
462 nv_icmd(dev, 0x0009c7, 0x00000000);
463 nv_icmd(dev, 0x0009c8, 0x00000000);
464 nv_icmd(dev, 0x0009c9, 0x00000000);
465 nv_icmd(dev, 0x0009ca, 0x00000000);
466 nv_icmd(dev, 0x0009cb, 0x00000000);
467 nv_icmd(dev, 0x0009cc, 0x00000000);
468 nv_icmd(dev, 0x0009cd, 0x00000000);
469 nv_icmd(dev, 0x0009ce, 0x00000000);
470 nv_icmd(dev, 0x0009cf, 0x00000000);
471 nv_icmd(dev, 0x0009d0, 0x00000000);
472 nv_icmd(dev, 0x0009d1, 0x00000000);
473 nv_icmd(dev, 0x0009d2, 0x00000000);
474 nv_icmd(dev, 0x0009d3, 0x00000000);
475 nv_icmd(dev, 0x0009d4, 0x00000000);
476 nv_icmd(dev, 0x0009d5, 0x00000000);
477 nv_icmd(dev, 0x0009d6, 0x00000000);
478 nv_icmd(dev, 0x0009d7, 0x00000000);
479 nv_icmd(dev, 0x0009d8, 0x00000000);
480 nv_icmd(dev, 0x0009d9, 0x00000000);
481 nv_icmd(dev, 0x0009da, 0x00000000);
482 nv_icmd(dev, 0x0009db, 0x00000000);
483 nv_icmd(dev, 0x0009dc, 0x00000000);
484 nv_icmd(dev, 0x0009dd, 0x00000000);
485 nv_icmd(dev, 0x0009de, 0x00000000);
486 nv_icmd(dev, 0x0009df, 0x00000000);
487 nv_icmd(dev, 0x0009e0, 0x00000000);
488 nv_icmd(dev, 0x0009e1, 0x00000000);
489 nv_icmd(dev, 0x0009e2, 0x00000000);
490 nv_icmd(dev, 0x0009e3, 0x00000000);
491 nv_icmd(dev, 0x0009e4, 0x00000000);
492 nv_icmd(dev, 0x0009e5, 0x00000000);
493 nv_icmd(dev, 0x0009e6, 0x00000000);
494 nv_icmd(dev, 0x0009e7, 0x00000000);
495 nv_icmd(dev, 0x0009e8, 0x00000000);
496 nv_icmd(dev, 0x0009e9, 0x00000000);
497 nv_icmd(dev, 0x0009ea, 0x00000000);
498 nv_icmd(dev, 0x0009eb, 0x00000000);
499 nv_icmd(dev, 0x0009ec, 0x00000000);
500 nv_icmd(dev, 0x0009ed, 0x00000000);
501 nv_icmd(dev, 0x0009ee, 0x00000000);
502 nv_icmd(dev, 0x0009ef, 0x00000000);
503 nv_icmd(dev, 0x0009f0, 0x00000000);
504 nv_icmd(dev, 0x0009f1, 0x00000000);
505 nv_icmd(dev, 0x0009f2, 0x00000000);
506 nv_icmd(dev, 0x0009f3, 0x00000000);
507 nv_icmd(dev, 0x0009f4, 0x00000000);
508 nv_icmd(dev, 0x0009f5, 0x00000000);
509 nv_icmd(dev, 0x0009f6, 0x00000000);
510 nv_icmd(dev, 0x0009f7, 0x00000000);
511 nv_icmd(dev, 0x0009f8, 0x00000000);
512 nv_icmd(dev, 0x0009f9, 0x00000000);
513 nv_icmd(dev, 0x0009fa, 0x00000000);
514 nv_icmd(dev, 0x0009fb, 0x00000000);
515 nv_icmd(dev, 0x0009fc, 0x00000000);
516 nv_icmd(dev, 0x0009fd, 0x00000000);
517 nv_icmd(dev, 0x0009fe, 0x00000000);
518 nv_icmd(dev, 0x0009ff, 0x00000000);
519 nv_icmd(dev, 0x000468, 0x00000004);
520 nv_icmd(dev, 0x00046c, 0x00000001);
521 nv_icmd(dev, 0x000470, 0x00000000);
522 nv_icmd(dev, 0x000471, 0x00000000);
523 nv_icmd(dev, 0x000472, 0x00000000);
524 nv_icmd(dev, 0x000473, 0x00000000);
525 nv_icmd(dev, 0x000474, 0x00000000);
526 nv_icmd(dev, 0x000475, 0x00000000);
527 nv_icmd(dev, 0x000476, 0x00000000);
528 nv_icmd(dev, 0x000477, 0x00000000);
529 nv_icmd(dev, 0x000478, 0x00000000);
530 nv_icmd(dev, 0x000479, 0x00000000);
531 nv_icmd(dev, 0x00047a, 0x00000000);
532 nv_icmd(dev, 0x00047b, 0x00000000);
533 nv_icmd(dev, 0x00047c, 0x00000000);
534 nv_icmd(dev, 0x00047d, 0x00000000);
535 nv_icmd(dev, 0x00047e, 0x00000000);
536 nv_icmd(dev, 0x00047f, 0x00000000);
537 nv_icmd(dev, 0x000480, 0x00000000);
538 nv_icmd(dev, 0x000481, 0x00000000);
539 nv_icmd(dev, 0x000482, 0x00000000);
540 nv_icmd(dev, 0x000483, 0x00000000);
541 nv_icmd(dev, 0x000484, 0x00000000);
542 nv_icmd(dev, 0x000485, 0x00000000);
543 nv_icmd(dev, 0x000486, 0x00000000);
544 nv_icmd(dev, 0x000487, 0x00000000);
545 nv_icmd(dev, 0x000488, 0x00000000);
546 nv_icmd(dev, 0x000489, 0x00000000);
547 nv_icmd(dev, 0x00048a, 0x00000000);
548 nv_icmd(dev, 0x00048b, 0x00000000);
549 nv_icmd(dev, 0x00048c, 0x00000000);
550 nv_icmd(dev, 0x00048d, 0x00000000);
551 nv_icmd(dev, 0x00048e, 0x00000000);
552 nv_icmd(dev, 0x00048f, 0x00000000);
553 nv_icmd(dev, 0x000490, 0x00000000);
554 nv_icmd(dev, 0x000491, 0x00000000);
555 nv_icmd(dev, 0x000492, 0x00000000);
556 nv_icmd(dev, 0x000493, 0x00000000);
557 nv_icmd(dev, 0x000494, 0x00000000);
558 nv_icmd(dev, 0x000495, 0x00000000);
559 nv_icmd(dev, 0x000496, 0x00000000);
560 nv_icmd(dev, 0x000497, 0x00000000);
561 nv_icmd(dev, 0x000498, 0x00000000);
562 nv_icmd(dev, 0x000499, 0x00000000);
563 nv_icmd(dev, 0x00049a, 0x00000000);
564 nv_icmd(dev, 0x00049b, 0x00000000);
565 nv_icmd(dev, 0x00049c, 0x00000000);
566 nv_icmd(dev, 0x00049d, 0x00000000);
567 nv_icmd(dev, 0x00049e, 0x00000000);
568 nv_icmd(dev, 0x00049f, 0x00000000);
569 nv_icmd(dev, 0x0004a0, 0x00000000);
570 nv_icmd(dev, 0x0004a1, 0x00000000);
571 nv_icmd(dev, 0x0004a2, 0x00000000);
572 nv_icmd(dev, 0x0004a3, 0x00000000);
573 nv_icmd(dev, 0x0004a4, 0x00000000);
574 nv_icmd(dev, 0x0004a5, 0x00000000);
575 nv_icmd(dev, 0x0004a6, 0x00000000);
576 nv_icmd(dev, 0x0004a7, 0x00000000);
577 nv_icmd(dev, 0x0004a8, 0x00000000);
578 nv_icmd(dev, 0x0004a9, 0x00000000);
579 nv_icmd(dev, 0x0004aa, 0x00000000);
580 nv_icmd(dev, 0x0004ab, 0x00000000);
581 nv_icmd(dev, 0x0004ac, 0x00000000);
582 nv_icmd(dev, 0x0004ad, 0x00000000);
583 nv_icmd(dev, 0x0004ae, 0x00000000);
584 nv_icmd(dev, 0x0004af, 0x00000000);
585 nv_icmd(dev, 0x0004b0, 0x00000000);
586 nv_icmd(dev, 0x0004b1, 0x00000000);
587 nv_icmd(dev, 0x0004b2, 0x00000000);
588 nv_icmd(dev, 0x0004b3, 0x00000000);
589 nv_icmd(dev, 0x0004b4, 0x00000000);
590 nv_icmd(dev, 0x0004b5, 0x00000000);
591 nv_icmd(dev, 0x0004b6, 0x00000000);
592 nv_icmd(dev, 0x0004b7, 0x00000000);
593 nv_icmd(dev, 0x0004b8, 0x00000000);
594 nv_icmd(dev, 0x0004b9, 0x00000000);
595 nv_icmd(dev, 0x0004ba, 0x00000000);
596 nv_icmd(dev, 0x0004bb, 0x00000000);
597 nv_icmd(dev, 0x0004bc, 0x00000000);
598 nv_icmd(dev, 0x0004bd, 0x00000000);
599 nv_icmd(dev, 0x0004be, 0x00000000);
600 nv_icmd(dev, 0x0004bf, 0x00000000);
601 nv_icmd(dev, 0x0004c0, 0x00000000);
602 nv_icmd(dev, 0x0004c1, 0x00000000);
603 nv_icmd(dev, 0x0004c2, 0x00000000);
604 nv_icmd(dev, 0x0004c3, 0x00000000);
605 nv_icmd(dev, 0x0004c4, 0x00000000);
606 nv_icmd(dev, 0x0004c5, 0x00000000);
607 nv_icmd(dev, 0x0004c6, 0x00000000);
608 nv_icmd(dev, 0x0004c7, 0x00000000);
609 nv_icmd(dev, 0x0004c8, 0x00000000);
610 nv_icmd(dev, 0x0004c9, 0x00000000);
611 nv_icmd(dev, 0x0004ca, 0x00000000);
612 nv_icmd(dev, 0x0004cb, 0x00000000);
613 nv_icmd(dev, 0x0004cc, 0x00000000);
614 nv_icmd(dev, 0x0004cd, 0x00000000);
615 nv_icmd(dev, 0x0004ce, 0x00000000);
616 nv_icmd(dev, 0x0004cf, 0x00000000);
617 nv_icmd(dev, 0x000510, 0x3f800000);
618 nv_icmd(dev, 0x000511, 0x3f800000);
619 nv_icmd(dev, 0x000512, 0x3f800000);
620 nv_icmd(dev, 0x000513, 0x3f800000);
621 nv_icmd(dev, 0x000514, 0x3f800000);
622 nv_icmd(dev, 0x000515, 0x3f800000);
623 nv_icmd(dev, 0x000516, 0x3f800000);
624 nv_icmd(dev, 0x000517, 0x3f800000);
625 nv_icmd(dev, 0x000518, 0x3f800000);
626 nv_icmd(dev, 0x000519, 0x3f800000);
627 nv_icmd(dev, 0x00051a, 0x3f800000);
628 nv_icmd(dev, 0x00051b, 0x3f800000);
629 nv_icmd(dev, 0x00051c, 0x3f800000);
630 nv_icmd(dev, 0x00051d, 0x3f800000);
631 nv_icmd(dev, 0x00051e, 0x3f800000);
632 nv_icmd(dev, 0x00051f, 0x3f800000);
633 nv_icmd(dev, 0x000520, 0x000002b6);
634 nv_icmd(dev, 0x000529, 0x00000001);
635 nv_icmd(dev, 0x000530, 0xffff0000);
636 nv_icmd(dev, 0x000531, 0xffff0000);
637 nv_icmd(dev, 0x000532, 0xffff0000);
638 nv_icmd(dev, 0x000533, 0xffff0000);
639 nv_icmd(dev, 0x000534, 0xffff0000);
640 nv_icmd(dev, 0x000535, 0xffff0000);
641 nv_icmd(dev, 0x000536, 0xffff0000);
642 nv_icmd(dev, 0x000537, 0xffff0000);
643 nv_icmd(dev, 0x000538, 0xffff0000);
644 nv_icmd(dev, 0x000539, 0xffff0000);
645 nv_icmd(dev, 0x00053a, 0xffff0000);
646 nv_icmd(dev, 0x00053b, 0xffff0000);
647 nv_icmd(dev, 0x00053c, 0xffff0000);
648 nv_icmd(dev, 0x00053d, 0xffff0000);
649 nv_icmd(dev, 0x00053e, 0xffff0000);
650 nv_icmd(dev, 0x00053f, 0xffff0000);
651 nv_icmd(dev, 0x000585, 0x0000003f);
652 nv_icmd(dev, 0x000576, 0x00000003);
653 nv_icmd(dev, 0x00057b, 0x00000059);
654 nv_icmd(dev, 0x000586, 0x00000040);
655 nv_icmd(dev, 0x000582, 0x00000080);
656 nv_icmd(dev, 0x000583, 0x00000080);
657 nv_icmd(dev, 0x0005c2, 0x00000001);
658 nv_icmd(dev, 0x000638, 0x00000001);
659 nv_icmd(dev, 0x000639, 0x00000001);
660 nv_icmd(dev, 0x00063a, 0x00000002);
661 nv_icmd(dev, 0x00063b, 0x00000001);
662 nv_icmd(dev, 0x00063c, 0x00000001);
663 nv_icmd(dev, 0x00063d, 0x00000002);
664 nv_icmd(dev, 0x00063e, 0x00000001);
665 nv_icmd(dev, 0x0008b8, 0x00000001);
666 nv_icmd(dev, 0x0008b9, 0x00000001);
667 nv_icmd(dev, 0x0008ba, 0x00000001);
668 nv_icmd(dev, 0x0008bb, 0x00000001);
669 nv_icmd(dev, 0x0008bc, 0x00000001);
670 nv_icmd(dev, 0x0008bd, 0x00000001);
671 nv_icmd(dev, 0x0008be, 0x00000001);
672 nv_icmd(dev, 0x0008bf, 0x00000001);
673 nv_icmd(dev, 0x000900, 0x00000001);
674 nv_icmd(dev, 0x000901, 0x00000001);
675 nv_icmd(dev, 0x000902, 0x00000001);
676 nv_icmd(dev, 0x000903, 0x00000001);
677 nv_icmd(dev, 0x000904, 0x00000001);
678 nv_icmd(dev, 0x000905, 0x00000001);
679 nv_icmd(dev, 0x000906, 0x00000001);
680 nv_icmd(dev, 0x000907, 0x00000001);
681 nv_icmd(dev, 0x000908, 0x00000002);
682 nv_icmd(dev, 0x000909, 0x00000002);
683 nv_icmd(dev, 0x00090a, 0x00000002);
684 nv_icmd(dev, 0x00090b, 0x00000002);
685 nv_icmd(dev, 0x00090c, 0x00000002);
686 nv_icmd(dev, 0x00090d, 0x00000002);
687 nv_icmd(dev, 0x00090e, 0x00000002);
688 nv_icmd(dev, 0x00090f, 0x00000002);
689 nv_icmd(dev, 0x000910, 0x00000001);
690 nv_icmd(dev, 0x000911, 0x00000001);
691 nv_icmd(dev, 0x000912, 0x00000001);
692 nv_icmd(dev, 0x000913, 0x00000001);
693 nv_icmd(dev, 0x000914, 0x00000001);
694 nv_icmd(dev, 0x000915, 0x00000001);
695 nv_icmd(dev, 0x000916, 0x00000001);
696 nv_icmd(dev, 0x000917, 0x00000001);
697 nv_icmd(dev, 0x000918, 0x00000001);
698 nv_icmd(dev, 0x000919, 0x00000001);
699 nv_icmd(dev, 0x00091a, 0x00000001);
700 nv_icmd(dev, 0x00091b, 0x00000001);
701 nv_icmd(dev, 0x00091c, 0x00000001);
702 nv_icmd(dev, 0x00091d, 0x00000001);
703 nv_icmd(dev, 0x00091e, 0x00000001);
704 nv_icmd(dev, 0x00091f, 0x00000001);
705 nv_icmd(dev, 0x000920, 0x00000002);
706 nv_icmd(dev, 0x000921, 0x00000002);
707 nv_icmd(dev, 0x000922, 0x00000002);
708 nv_icmd(dev, 0x000923, 0x00000002);
709 nv_icmd(dev, 0x000924, 0x00000002);
710 nv_icmd(dev, 0x000925, 0x00000002);
711 nv_icmd(dev, 0x000926, 0x00000002);
712 nv_icmd(dev, 0x000927, 0x00000002);
713 nv_icmd(dev, 0x000928, 0x00000001);
714 nv_icmd(dev, 0x000929, 0x00000001);
715 nv_icmd(dev, 0x00092a, 0x00000001);
716 nv_icmd(dev, 0x00092b, 0x00000001);
717 nv_icmd(dev, 0x00092c, 0x00000001);
718 nv_icmd(dev, 0x00092d, 0x00000001);
719 nv_icmd(dev, 0x00092e, 0x00000001);
720 nv_icmd(dev, 0x00092f, 0x00000001);
721 nv_icmd(dev, 0x000648, 0x00000001);
722 nv_icmd(dev, 0x000649, 0x00000001);
723 nv_icmd(dev, 0x00064a, 0x00000001);
724 nv_icmd(dev, 0x00064b, 0x00000001);
725 nv_icmd(dev, 0x00064c, 0x00000001);
726 nv_icmd(dev, 0x00064d, 0x00000001);
727 nv_icmd(dev, 0x00064e, 0x00000001);
728 nv_icmd(dev, 0x00064f, 0x00000001);
729 nv_icmd(dev, 0x000650, 0x00000001);
730 nv_icmd(dev, 0x000658, 0x0000000f);
731 nv_icmd(dev, 0x0007ff, 0x0000000a);
732 nv_icmd(dev, 0x00066a, 0x40000000);
733 nv_icmd(dev, 0x00066b, 0x10000000);
734 nv_icmd(dev, 0x00066c, 0xffff0000);
735 nv_icmd(dev, 0x00066d, 0xffff0000);
736 nv_icmd(dev, 0x0007af, 0x00000008);
737 nv_icmd(dev, 0x0007b0, 0x00000008);
738 nv_icmd(dev, 0x0007f6, 0x00000001);
739 nv_icmd(dev, 0x0006b2, 0x00000055);
740 nv_icmd(dev, 0x0007ad, 0x00000003);
741 nv_icmd(dev, 0x000937, 0x00000001);
742 nv_icmd(dev, 0x000971, 0x00000008);
743 nv_icmd(dev, 0x000972, 0x00000040);
744 nv_icmd(dev, 0x000973, 0x0000012c);
745 nv_icmd(dev, 0x00097c, 0x00000040);
746 nv_icmd(dev, 0x000979, 0x00000003);
747 nv_icmd(dev, 0x000975, 0x00000020);
748 nv_icmd(dev, 0x000976, 0x00000001);
749 nv_icmd(dev, 0x000977, 0x00000020);
750 nv_icmd(dev, 0x000978, 0x00000001);
751 nv_icmd(dev, 0x000957, 0x00000003);
752 nv_icmd(dev, 0x00095e, 0x20164010);
753 nv_icmd(dev, 0x00095f, 0x00000020);
754 nv_icmd(dev, 0x00097d, 0x00000020);
755 nv_icmd(dev, 0x000683, 0x00000006);
756 nv_icmd(dev, 0x000685, 0x003fffff);
757 nv_icmd(dev, 0x000687, 0x003fffff);
758 nv_icmd(dev, 0x0006a0, 0x00000005);
759 nv_icmd(dev, 0x000840, 0x00400008);
760 nv_icmd(dev, 0x000841, 0x08000080);
761 nv_icmd(dev, 0x000842, 0x00400008);
762 nv_icmd(dev, 0x000843, 0x08000080);
763 nv_icmd(dev, 0x000818, 0x00000000);
764 nv_icmd(dev, 0x000819, 0x00000000);
765 nv_icmd(dev, 0x00081a, 0x00000000);
766 nv_icmd(dev, 0x00081b, 0x00000000);
767 nv_icmd(dev, 0x00081c, 0x00000000);
768 nv_icmd(dev, 0x00081d, 0x00000000);
769 nv_icmd(dev, 0x00081e, 0x00000000);
770 nv_icmd(dev, 0x00081f, 0x00000000);
771 nv_icmd(dev, 0x000848, 0x00000000);
772 nv_icmd(dev, 0x000849, 0x00000000);
773 nv_icmd(dev, 0x00084a, 0x00000000);
774 nv_icmd(dev, 0x00084b, 0x00000000);
775 nv_icmd(dev, 0x00084c, 0x00000000);
776 nv_icmd(dev, 0x00084d, 0x00000000);
777 nv_icmd(dev, 0x00084e, 0x00000000);
778 nv_icmd(dev, 0x00084f, 0x00000000);
779 nv_icmd(dev, 0x000850, 0x00000000);
780 nv_icmd(dev, 0x000851, 0x00000000);
781 nv_icmd(dev, 0x000852, 0x00000000);
782 nv_icmd(dev, 0x000853, 0x00000000);
783 nv_icmd(dev, 0x000854, 0x00000000);
784 nv_icmd(dev, 0x000855, 0x00000000);
785 nv_icmd(dev, 0x000856, 0x00000000);
786 nv_icmd(dev, 0x000857, 0x00000000);
787 nv_icmd(dev, 0x000738, 0x00000000);
788 nv_icmd(dev, 0x0006aa, 0x00000001);
789 nv_icmd(dev, 0x0006ab, 0x00000002);
790 nv_icmd(dev, 0x0006ac, 0x00000080);
791 nv_icmd(dev, 0x0006ad, 0x00000100);
792 nv_icmd(dev, 0x0006ae, 0x00000100);
793 nv_icmd(dev, 0x0006b1, 0x00000011);
794 nv_icmd(dev, 0x0006bb, 0x000000cf);
795 nv_icmd(dev, 0x0006ce, 0x2a712488);
796 nv_icmd(dev, 0x000739, 0x4085c000);
797 nv_icmd(dev, 0x00073a, 0x00000080);
798 nv_icmd(dev, 0x000786, 0x80000100);
799 nv_icmd(dev, 0x00073c, 0x00010100);
800 nv_icmd(dev, 0x00073d, 0x02800000);
801 nv_icmd(dev, 0x000787, 0x000000cf);
802 nv_icmd(dev, 0x00078c, 0x00000008);
803 nv_icmd(dev, 0x000792, 0x00000001);
804 nv_icmd(dev, 0x000794, 0x00000001);
805 nv_icmd(dev, 0x000795, 0x00000001);
806 nv_icmd(dev, 0x000796, 0x00000001);
807 nv_icmd(dev, 0x000797, 0x000000cf);
808 nv_icmd(dev, 0x000836, 0x00000001);
809 nv_icmd(dev, 0x00079a, 0x00000002);
810 nv_icmd(dev, 0x000833, 0x04444480);
811 nv_icmd(dev, 0x0007a1, 0x00000001);
812 nv_icmd(dev, 0x0007a3, 0x00000001);
813 nv_icmd(dev, 0x0007a4, 0x00000001);
814 nv_icmd(dev, 0x0007a5, 0x00000001);
815 nv_icmd(dev, 0x000831, 0x00000004);
816 nv_icmd(dev, 0x000b07, 0x00000002);
817 nv_icmd(dev, 0x000b08, 0x00000100);
818 nv_icmd(dev, 0x000b09, 0x00000100);
819 nv_icmd(dev, 0x000b0a, 0x00000001);
820 nv_icmd(dev, 0x000a04, 0x000000ff);
821 nv_icmd(dev, 0x000a0b, 0x00000040);
822 nv_icmd(dev, 0x00097f, 0x00000100);
823 nv_icmd(dev, 0x000a02, 0x00000001);
824 nv_icmd(dev, 0x000809, 0x00000007);
825 nv_icmd(dev, 0x00c221, 0x00000040);
826 nv_icmd(dev, 0x00c1b0, 0x0000000f);
827 nv_icmd(dev, 0x00c1b1, 0x0000000f);
828 nv_icmd(dev, 0x00c1b2, 0x0000000f);
829 nv_icmd(dev, 0x00c1b3, 0x0000000f);
830 nv_icmd(dev, 0x00c1b4, 0x0000000f);
831 nv_icmd(dev, 0x00c1b5, 0x0000000f);
832 nv_icmd(dev, 0x00c1b6, 0x0000000f);
833 nv_icmd(dev, 0x00c1b7, 0x0000000f);
834 nv_icmd(dev, 0x00c1b8, 0x0fac6881);
835 nv_icmd(dev, 0x00c1b9, 0x00fac688);
836 nv_icmd(dev, 0x00c401, 0x00000001);
837 nv_icmd(dev, 0x00c402, 0x00010001);
838 nv_icmd(dev, 0x00c403, 0x00000001);
839 nv_icmd(dev, 0x00c404, 0x00000001);
840 nv_icmd(dev, 0x00c40e, 0x00000020);
841 nv_icmd(dev, 0x00c500, 0x00000003);
842 nv_icmd(dev, 0x01e100, 0x00000001);
843 nv_icmd(dev, 0x001000, 0x00000002);
844 nv_icmd(dev, 0x0006aa, 0x00000001);
845 nv_icmd(dev, 0x0006ad, 0x00000100);
846 nv_icmd(dev, 0x0006ae, 0x00000100);
847 nv_icmd(dev, 0x0006b1, 0x00000011);
848 nv_icmd(dev, 0x00078c, 0x00000008);
849 nv_icmd(dev, 0x000792, 0x00000001);
850 nv_icmd(dev, 0x000794, 0x00000001);
851 nv_icmd(dev, 0x000795, 0x00000001);
852 nv_icmd(dev, 0x000796, 0x00000001);
853 nv_icmd(dev, 0x000797, 0x000000cf);
854 nv_icmd(dev, 0x00079a, 0x00000002);
855 nv_icmd(dev, 0x000833, 0x04444480);
856 nv_icmd(dev, 0x0007a1, 0x00000001);
857 nv_icmd(dev, 0x0007a3, 0x00000001);
858 nv_icmd(dev, 0x0007a4, 0x00000001);
859 nv_icmd(dev, 0x0007a5, 0x00000001);
860 nv_icmd(dev, 0x000831, 0x00000004);
861 nv_icmd(dev, 0x01e100, 0x00000001);
862 nv_icmd(dev, 0x001000, 0x00000008);
863 nv_icmd(dev, 0x000039, 0x00000000);
864 nv_icmd(dev, 0x00003a, 0x00000000);
865 nv_icmd(dev, 0x00003b, 0x00000000);
866 nv_icmd(dev, 0x000380, 0x00000001);
867 nv_icmd(dev, 0x000366, 0x00000000);
868 nv_icmd(dev, 0x000367, 0x00000000);
869 nv_icmd(dev, 0x000368, 0x00000fff);
870 nv_icmd(dev, 0x000370, 0x00000000);
871 nv_icmd(dev, 0x000371, 0x00000000);
872 nv_icmd(dev, 0x000372, 0x000fffff);
873 nv_icmd(dev, 0x000813, 0x00000006);
874 nv_icmd(dev, 0x000814, 0x00000008);
875 nv_icmd(dev, 0x000957, 0x00000003);
876 nv_icmd(dev, 0x000818, 0x00000000);
877 nv_icmd(dev, 0x000819, 0x00000000);
878 nv_icmd(dev, 0x00081a, 0x00000000);
879 nv_icmd(dev, 0x00081b, 0x00000000);
880 nv_icmd(dev, 0x00081c, 0x00000000);
881 nv_icmd(dev, 0x00081d, 0x00000000);
882 nv_icmd(dev, 0x00081e, 0x00000000);
883 nv_icmd(dev, 0x00081f, 0x00000000);
884 nv_icmd(dev, 0x000848, 0x00000000);
885 nv_icmd(dev, 0x000849, 0x00000000);
886 nv_icmd(dev, 0x00084a, 0x00000000);
887 nv_icmd(dev, 0x00084b, 0x00000000);
888 nv_icmd(dev, 0x00084c, 0x00000000);
889 nv_icmd(dev, 0x00084d, 0x00000000);
890 nv_icmd(dev, 0x00084e, 0x00000000);
891 nv_icmd(dev, 0x00084f, 0x00000000);
892 nv_icmd(dev, 0x000850, 0x00000000);
893 nv_icmd(dev, 0x000851, 0x00000000);
894 nv_icmd(dev, 0x000852, 0x00000000);
895 nv_icmd(dev, 0x000853, 0x00000000);
896 nv_icmd(dev, 0x000854, 0x00000000);
897 nv_icmd(dev, 0x000855, 0x00000000);
898 nv_icmd(dev, 0x000856, 0x00000000);
899 nv_icmd(dev, 0x000857, 0x00000000);
900 nv_icmd(dev, 0x000738, 0x00000000);
901 nv_icmd(dev, 0x000b07, 0x00000002);
902 nv_icmd(dev, 0x000b08, 0x00000100);
903 nv_icmd(dev, 0x000b09, 0x00000100);
904 nv_icmd(dev, 0x000b0a, 0x00000001);
905 nv_icmd(dev, 0x000a04, 0x000000ff);
906 nv_icmd(dev, 0x00097f, 0x00000100);
907 nv_icmd(dev, 0x000a02, 0x00000001);
908 nv_icmd(dev, 0x000809, 0x00000007);
909 nv_icmd(dev, 0x00c221, 0x00000040);
910 nv_icmd(dev, 0x00c401, 0x00000001);
911 nv_icmd(dev, 0x00c402, 0x00010001);
912 nv_icmd(dev, 0x00c403, 0x00000001);
913 nv_icmd(dev, 0x00c404, 0x00000001);
914 nv_icmd(dev, 0x00c40e, 0x00000020);
915 nv_icmd(dev, 0x00c500, 0x00000003);
916 nv_icmd(dev, 0x01e100, 0x00000001);
917 nv_icmd(dev, 0x001000, 0x00000001);
918 nv_icmd(dev, 0x000b07, 0x00000002);
919 nv_icmd(dev, 0x000b08, 0x00000100);
920 nv_icmd(dev, 0x000b09, 0x00000100);
921 nv_icmd(dev, 0x000b0a, 0x00000001);
922 nv_icmd(dev, 0x01e100, 0x00000001);
923 nv_wr32(dev, 0x400208, 0x00000000);
924}
925
926static void
927nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
928{
929 nv_wr32(dev, 0x40448c, data);
930 nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
931}
932
933static void
934nve0_grctx_generate_a097(struct drm_device *dev)
935{
936 nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
937 nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
938 nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
939 nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
940 nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
941 nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
942 nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
943 nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
944 nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
945 nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
946 nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
947 nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
948 nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
949 nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
950 nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
951 nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
952 nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
953 nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
954 nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
955 nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
956 nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
957 nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
958 nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
959 nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
960 nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
961 nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
962 nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
963 nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
964 nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
965 nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
966 nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
967 nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
968 nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
969 nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
970 nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
971 nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
972 nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
973 nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
974 nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
975 nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
976 nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
977 nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
978 nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
979 nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
980 nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
981 nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
982 nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
983 nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
984 nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
985 nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
986 nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
987 nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
988 nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
989 nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
990 nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
991 nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
992 nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
993 nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
994 nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
995 nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
996 nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
997 nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
998 nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
999 nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
1000 nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
1001 nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
1002 nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
1003 nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
1004 nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
1005 nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
1006 nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
1007 nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
1008 nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
1009 nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
1010 nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
1011 nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
1012 nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
1013 nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
1014 nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
1015 nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
1016 nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
1017 nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
1018 nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
1019 nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
1020 nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
1021 nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
1022 nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
1023 nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
1024 nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
1025 nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
1026 nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
1027 nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
1028 nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
1029 nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
1030 nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
1031 nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
1032 nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
1033 nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
1034 nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
1035 nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
1036 nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
1037 nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
1038 nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
1039 nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
1040 nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
1041 nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
1042 nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
1043 nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
1044 nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
1045 nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
1046 nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
1047 nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
1048 nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
1049 nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
1050 nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
1051 nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
1052 nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
1053 nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
1054 nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
1055 nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
1056 nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
1057 nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
1058 nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
1059 nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
1060 nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
1061 nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
1062 nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
1063 nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
1064 nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
1065 nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
1066 nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
1067 nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
1068 nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
1069 nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
1070 nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
1071 nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
1072 nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
1073 nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
1074 nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
1075 nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
1076 nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
1077 nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
1078 nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
1079 nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
1080 nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
1081 nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
1082 nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
1083 nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
1084 nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
1085 nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
1086 nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
1087 nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
1088 nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
1089 nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
1090 nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
1091 nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
1092 nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
1093 nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
1094 nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
1095 nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
1096 nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
1097 nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
1098 nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
1099 nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
1100 nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
1101 nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
1102 nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
1103 nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
1104 nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
1105 nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
1106 nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
1107 nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
1108 nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
1109 nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
1110 nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
1111 nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
1112 nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
1113 nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
1114 nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
1115 nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
1116 nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
1117 nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
1118 nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
1119 nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
1120 nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
1121 nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
1122 nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
1123 nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
1124 nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
1125 nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
1126 nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
1127 nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
1128 nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
1129 nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
1130 nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
1131 nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
1132 nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
1133 nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
1134 nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
1135 nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
1136 nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
1137 nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
1138 nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
1139 nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
1140 nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
1141 nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
1142 nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
1143 nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
1144 nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
1145 nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
1146 nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
1147 nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
1148 nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
1149 nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
1150 nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
1151 nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
1152 nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
1153 nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
1154 nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
1155 nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
1156 nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
1157 nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
1158 nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
1159 nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
1160 nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
1161 nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
1162 nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
1163 nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
1164 nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
1165 nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
1166 nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
1167 nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
1168 nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
1169 nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
1170 nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
1171 nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
1172 nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
1173 nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
1174 nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
1175 nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
1176 nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
1177 nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
1178 nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
1179 nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
1180 nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
1181 nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
1182 nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
1183 nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
1184 nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
1185 nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
1186 nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
1187 nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
1188 nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
1189 nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
1190 nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
1191 nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
1192 nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
1193 nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
1194 nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
1195 nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
1196 nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
1197 nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
1198 nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
1199 nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
1200 nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
1201 nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
1202 nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
1203 nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
1204 nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
1205 nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
1206 nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
1207 nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
1208 nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
1209 nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
1210 nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
1211 nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
1212 nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
1213 nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
1214 nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
1215 nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
1216 nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
1217 nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
1218 nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
1219 nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
1220 nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
1221 nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
1222 nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
1223 nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
1224 nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
1225 nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
1226 nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
1227 nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
1228 nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
1229 nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
1230 nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
1231 nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
1232 nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
1233 nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
1234 nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
1235 nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
1236 nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
1237 nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
1238 nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
1239 nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
1240 nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
1241 nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
1242 nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
1243 nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
1244 nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
1245 nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
1246 nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
1247 nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
1248 nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
1249 nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
1250 nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
1251 nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
1252 nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
1253 nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
1254 nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
1255 nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
1256 nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
1257 nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
1258 nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
1259 nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
1260 nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
1261 nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
1262 nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
1263 nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
1264 nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
1265 nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
1266 nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
1267 nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
1268 nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
1269 nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
1270 nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
1271 nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
1272 nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
1273 nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
1274 nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
1275 nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
1276 nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
1277 nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
1278 nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
1279 nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
1280 nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
1281 nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
1282 nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
1283 nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
1284 nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
1285 nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
1286 nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
1287 nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
1288 nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
1289 nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
1290 nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
1291 nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
1292 nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
1293 nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
1294 nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
1295 nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
1296 nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
1297 nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
1298 nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
1299 nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
1300 nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
1301 nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
1302 nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
1303 nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
1304 nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
1305 nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
1306 nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
1307 nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
1308 nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
1309 nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
1310 nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
1311 nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
1312 nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
1313 nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
1314 nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
1315 nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
1316 nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
1317 nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
1318 nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
1319 nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
1320 nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
1321 nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
1322 nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
1323 nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
1324 nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
1325 nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
1326 nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
1327 nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
1328 nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
1329 nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
1330 nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
1331 nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
1332 nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
1333 nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
1334 nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
1335 nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
1336 nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
1337 nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
1338 nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
1339 nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
1340 nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
1341 nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
1342 nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
1343 nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
1344 nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
1345 nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
1346 nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
1347 nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
1348 nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
1349 nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
1350 nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
1351 nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
1352 nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
1353 nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
1354 nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
1355 nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
1356 nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
1357 nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
1358 nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
1359 nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
1360 nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
1361 nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
1362 nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
1363 nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
1364 nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
1365 nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
1366 nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
1367 nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
1368 nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
1369 nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
1370 nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
1371 nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
1372 nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
1373 nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
1374 nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
1375 nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
1376 nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
1377 nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
1378 nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
1379 nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
1380 nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
1381 nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
1382 nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
1383 nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
1384 nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
1385 nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
1386 nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
1387 nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
1388 nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
1389 nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
1390 nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
1391 nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
1392 nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
1393 nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
1394 nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
1395 nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
1396 nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
1397 nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
1398 nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
1399 nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
1400 nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
1401 nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
1402 nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
1403 nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
1404 nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
1405 nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
1406 nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
1407 nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
1408 nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
1409 nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
1410 nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
1411 nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
1412 nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
1413 nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
1414 nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
1415 nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
1416 nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
1417 nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
1418 nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
1419 nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
1420 nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
1421 nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
1422 nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
1423 nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
1424 nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
1425 nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
1426 nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
1427 nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
1428 nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
1429 nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
1430 nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
1431 nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
1432 nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
1433 nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
1434 nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
1435 nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
1436 nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
1437 nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
1438 nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
1439 nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
1440 nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
1441 nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
1442 nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
1443 nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
1444 nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
1445 nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
1446 nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
1447 nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
1448 nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
1449 nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
1450 nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
1451 nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
1452 nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
1453 nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
1454 nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
1455 nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
1456 nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
1457 nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
1458 nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
1459 nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
1460 nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
1461 nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
1462 nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
1463 nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
1464 nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
1465 nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
1466 nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
1467 nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
1468 nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
1469 nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
1470 nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
1471 nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
1472 nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
1473 nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
1474 nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
1475 nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
1476 nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
1477 nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
1478 nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
1479 nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
1480 nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
1481 nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
1482 nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
1483 nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
1484 nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
1485 nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
1486 nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
1487 nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
1488 nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
1489 nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
1490 nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
1491 nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
1492 nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
1493 nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
1494 nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
1495 nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
1496 nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
1497 nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
1498 nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
1499 nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
1500 nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
1501 nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
1502 nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
1503 nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
1504 nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
1505 nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
1506 nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
1507 nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
1508 nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
1509 nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
1510 nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
1511 nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
1512 nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
1513 nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
1514 nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
1515 nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
1516 nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
1517 nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
1518 nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
1519 nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
1520 nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
1521 nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
1522 nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
1523 nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
1524 nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
1525 nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
1526 nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
1527 nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
1528 nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
1529 nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
1530 nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
1531 nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
1532 nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
1533 nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
1534 nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
1535 nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
1536 nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
1537 nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
1538 nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
1539 nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
1540 nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
1541 nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
1542 nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
1543 nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
1544 nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
1545 nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
1546 nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
1547 nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
1548 nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
1549 nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
1550 nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
1551 nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
1552 nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
1553 nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
1554 nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
1555 nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
1556 nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
1557 nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
1558 nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
1559 nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
1560 nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
1561 nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
1562 nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
1563 nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
1564 nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
1565 nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
1566 nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
1567 nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
1568 nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
1569 nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
1570 nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
1571 nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
1572 nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
1573 nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
1574 nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
1575 nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
1576 nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
1577 nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
1578 nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
1579 nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
1580 nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
1581 nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
1582 nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
1583 nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
1584 nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
1585 nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
1586 nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
1587 nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
1588 nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
1589 nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
1590 nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
1591 nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
1592 nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
1593 nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
1594 nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
1595 nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
1596 nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
1597 nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
1598 nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
1599 nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
1600 nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
1601 nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
1602 nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
1603 nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
1604 nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
1605 nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
1606 nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
1607 nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
1608 nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
1609 nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
1610 nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
1611 nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
1612 nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
1613 nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
1614 nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
1615 nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
1616 nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
1617 nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
1618 nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
1619 nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
1620 nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
1621 nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
1622 nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
1623 nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
1624 nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
1625 nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
1626 nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
1627 nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
1628 nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
1629 nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
1630 nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
1631 nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
1632 nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
1633 nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
1634 nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
1635 nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
1636 nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
1637 nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
1638 nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
1639 nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
1640 nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
1641 nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
1642 nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
1643 nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
1644 nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
1645 nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
1646 nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
1647 nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
1648 nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
1649 nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
1650 nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
1651 nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
1652 nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
1653 nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
1654 nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
1655 nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
1656 nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
1657 nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
1658 nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
1659 nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
1660 nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
1661 nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
1662 nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
1663 nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
1664 nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
1665 nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
1666 nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
1667 nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
1668 nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
1669 nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
1670 nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
1671 nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
1672 nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
1673 nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
1674 nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
1675 nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
1676 nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
1677 nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
1678 nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
1679 nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
1680 nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
1681 nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
1682 nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
1683 nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
1684 nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
1685 nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
1686 nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
1687 nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
1688 nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
1689 nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
1690 nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
1691 nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
1692 nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
1693 nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
1694 nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
1695 nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
1696 nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
1697 nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
1698 nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
1699 nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
1700 nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
1701 nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
1702 nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
1703 nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
1704 nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
1705 nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
1706 nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
1707 nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
1708 nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
1709 nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
1710 nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
1711 nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
1712 nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
1713 nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
1714 nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
1715 nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
1716 nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
1717 nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
1718 nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
1719 nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
1720 nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
1721 nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
1722 nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
1723 nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
1724 nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
1725 nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
1726 nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
1727 nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
1728 nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
1729 nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
1730 nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
1731 nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
1732 nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
1733 nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
1734 nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
1735 nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
1736 nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
1737 nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
1738 nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
1739 nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
1740 nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
1741 nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
1742 nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
1743 nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
1744 nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
1745 nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
1746 nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
1747 nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
1748 nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
1749 nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
1750 nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
1751 nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
1752 nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
1753 nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
1754 nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
1755 nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
1756 nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
1757 nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
1758 nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
1759 nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
1760 nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
1761 nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
1762 nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
1763 nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
1764 nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
1765 nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
1766 nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
1767 nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
1768 nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
1769 nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
1770 nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
1771 nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
1772 nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
1773 nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
1774 nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
1775 nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
1776 nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
1777 nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
1778 nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
1779 nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
1780 nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
1781 nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
1782 nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
1783 nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
1784 nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
1785 nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
1786 nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
1787 nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
1788 nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
1789 nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
1790 nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
1791 nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
1792 nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
1793 nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
1794 nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
1795 nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
1796 nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
1797 nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
1798 nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
1799 nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
1800 nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
1801 nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
1802 nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
1803 nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
1804 nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
1805 nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
1806 nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
1807 nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
1808 nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
1809 nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
1810 nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
1811 nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
1812 nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
1813 nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
1814 nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
1815 nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
1816 nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
1817 nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
1818 nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
1819 nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
1820 nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
1821 nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
1822 nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
1823 nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
1824 nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
1825 nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
1826 nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
1827 nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
1828 nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
1829 nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
1830 nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
1831 nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
1832 nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
1833 nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
1834 nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
1835 nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
1836 nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
1837 nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
1838 nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
1839 nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
1840 nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
1841 nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
1842 nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
1843 nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
1844 nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
1845 nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
1846 nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
1847 nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
1848 nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
1849 nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
1850 nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
1851 nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
1852 nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
1853 nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
1854 nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
1855 nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
1856 nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
1857 nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
1858 nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
1859 nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
1860 nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
1861 nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
1862 nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
1863 nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
1864 nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
1865 nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
1866 nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
1867 nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
1868 nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
1869 nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
1870 nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
1871 nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
1872 nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
1873 nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
1874 nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
1875 nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
1876 nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
1877 nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
1878 nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
1879 nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
1880 nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
1881 nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
1882 nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
1883 nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
1884 nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
1885 nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
1886 nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
1887 nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
1888 nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
1889 nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
1890 nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
1891 nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
1892 nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
1893 nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
1894 nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
1895 nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
1896 nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
1897 nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
1898 nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
1899 nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
1900 nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
1901 nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
1902 nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
1903 nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
1904 nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
1905 nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
1906 nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
1907 nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
1908 nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
1909 nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
1910 nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
1911 nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
1912 nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
1913 nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
1914 nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
1915 nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
1916 nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
1917 nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
1918 nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
1919 nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
1920 nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
1921 nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
1922 nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
1923 nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
1924 nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
1925 nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
1926 nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
1927 nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
1928 nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
1929 nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
1930 nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
1931 nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
1932 nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
1933 nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
1934 nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
1935 nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
1936 nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
1937 nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
1938 nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
1939 nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
1940 nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
1941 nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
1942 nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
1943 nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
1944 nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
1945 nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
1946 nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
1947 nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
1948 nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
1949 nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
1950 nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
1951 nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
1952 nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
1953 nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
1954 nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
1955 nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
1956 nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
1957 nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
1958 nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
1959 nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
1960 nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
1961 nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
1962 nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
1963 nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
1964 nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
1965 nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
1966 nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
1967 nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
1968 nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
1969 nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
1970 nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
1971 nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
1972 nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
1973 nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
1974 nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
1975 nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
1976 nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
1977 nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
1978 nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
1979 nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
1980 nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
1981 nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
1982 nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
1983 nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
1984 nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
1985 nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
1986 nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
1987 nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
1988 nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
1989 nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
1990 nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
1991 nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
1992 nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
1993 nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
1994 nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
1995 nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
1996 nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
1997 nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
1998 nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
1999 nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
2000 nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
2001 nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
2002 nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
2003 nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
2004 nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
2005 nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
2006 nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
2007 nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
2008 nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
2009 nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
2010 nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
2011 nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
2012 nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
2013 nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
2014 nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
2015 nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
2016 nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
2017 nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
2018 nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
2019 nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
2020 nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
2021 nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
2022 nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
2023 nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
2024 nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
2025 nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
2026 nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
2027 nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
2028 nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
2029 nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
2030 nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
2031 nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
2032 nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
2033 nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
2034 nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
2035 nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
2036 nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
2037 nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
2038 nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
2039 nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
2040 nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
2041 nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
2042 nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
2043 nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
2044 nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
2045 nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
2046 nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
2047 nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
2048 nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
2049 nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
2050 nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
2051 nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
2052 nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
2053 nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
2054 nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
2055 nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
2056 nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
2057 nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
2058 nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
2059 nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
2060 nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
2061 nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
2062 nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
2063 nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
2064 nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
2065 nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
2066 nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
2067 nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
2068 nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
2069 nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
2070 nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
2071 nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
2072 nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
2073 nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
2074 nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
2075 nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
2076 nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
2077 nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
2078 nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
2079 nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
2080 nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
2081 nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
2082 nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
2083 nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
2084 nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
2085 nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
2086 nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
2087 nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
2088 nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
2089 nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
2090 nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
2091 nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
2092 nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
2093 nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
2094 nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
2095 nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
2096 nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
2097 nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
2098 nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
2099 nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
2100 nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
2101 nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
2102 nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
2103 nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
2104 nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
2105 nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
2106 nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
2107 nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
2108 nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
2109 nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
2110 nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
2111 nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
2112 nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
2113 nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
2114 nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
2115 nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
2116 nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
2117 nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
2118 nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
2119 nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
2120 nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
2121 nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
2122 nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
2123 nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
2124 nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
2125 nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
2126 nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
2127 nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
2128 nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
2129 nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
2130 nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
2131 nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
2132 nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
2133 nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
2134 nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
2135 nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
2136 nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
2137 nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
2138 nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
2139 nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
2140 nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
2141 nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
2142 nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
2143 nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
2144 nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
2145 nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
2146 nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
2147 nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
2148 nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
2149 nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
2150 nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
2151 nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
2152 nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
2153 nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
2154 nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
2155 nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
2156 nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
2157 nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
2158 nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
2159 nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
2160 nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
2161}
2162
2163static void
2164nve0_grctx_generate_902d(struct drm_device *dev)
2165{
2166 nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
2167 nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
2168 nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
2169 nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
2170 nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
2171 nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
2172 nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
2173 nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
2174 nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
2175 nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
2176 nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
2177 nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
2178 nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
2179 nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
2180 nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
2181 nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
2182 nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
2183 nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
2184}
2185
2186static void
2187nve0_graph_generate_unk40xx(struct drm_device *dev)
2188{
2189 nv_wr32(dev, 0x404010, 0x0);
2190 nv_wr32(dev, 0x404014, 0x0);
2191 nv_wr32(dev, 0x404018, 0x0);
2192 nv_wr32(dev, 0x40401c, 0x0);
2193 nv_wr32(dev, 0x404020, 0x0);
2194 nv_wr32(dev, 0x404024, 0xe000);
2195 nv_wr32(dev, 0x404028, 0x0);
2196 nv_wr32(dev, 0x4040a8, 0x0);
2197 nv_wr32(dev, 0x4040ac, 0x0);
2198 nv_wr32(dev, 0x4040b0, 0x0);
2199 nv_wr32(dev, 0x4040b4, 0x0);
2200 nv_wr32(dev, 0x4040b8, 0x0);
2201 nv_wr32(dev, 0x4040bc, 0x0);
2202 nv_wr32(dev, 0x4040c0, 0x0);
2203 nv_wr32(dev, 0x4040c4, 0x0);
2204 nv_wr32(dev, 0x4040c8, 0xf800008f);
2205 nv_wr32(dev, 0x4040d0, 0x0);
2206 nv_wr32(dev, 0x4040d4, 0x0);
2207 nv_wr32(dev, 0x4040d8, 0x0);
2208 nv_wr32(dev, 0x4040dc, 0x0);
2209 nv_wr32(dev, 0x4040e0, 0x0);
2210 nv_wr32(dev, 0x4040e4, 0x0);
2211 nv_wr32(dev, 0x4040e8, 0x1000);
2212 nv_wr32(dev, 0x4040f8, 0x0);
2213 nv_wr32(dev, 0x404130, 0x0);
2214 nv_wr32(dev, 0x404134, 0x0);
2215 nv_wr32(dev, 0x404138, 0x20000040);
2216 nv_wr32(dev, 0x404150, 0x2e);
2217 nv_wr32(dev, 0x404154, 0x400);
2218 nv_wr32(dev, 0x404158, 0x200);
2219 nv_wr32(dev, 0x404164, 0x55);
2220 nv_wr32(dev, 0x4041a0, 0x0);
2221 nv_wr32(dev, 0x4041a4, 0x0);
2222 nv_wr32(dev, 0x4041a8, 0x0);
2223 nv_wr32(dev, 0x4041ac, 0x0);
2224 nv_wr32(dev, 0x404200, 0x0);
2225 nv_wr32(dev, 0x404204, 0x0);
2226 nv_wr32(dev, 0x404208, 0x0);
2227 nv_wr32(dev, 0x40420c, 0x0);
2228}
2229
2230static void
2231nve0_graph_generate_unk44xx(struct drm_device *dev)
2232{
2233 nv_wr32(dev, 0x404404, 0x0);
2234 nv_wr32(dev, 0x404408, 0x0);
2235 nv_wr32(dev, 0x40440c, 0x0);
2236 nv_wr32(dev, 0x404410, 0x0);
2237 nv_wr32(dev, 0x404414, 0x0);
2238 nv_wr32(dev, 0x404418, 0x0);
2239 nv_wr32(dev, 0x40441c, 0x0);
2240 nv_wr32(dev, 0x404420, 0x0);
2241 nv_wr32(dev, 0x404424, 0x0);
2242 nv_wr32(dev, 0x404428, 0x0);
2243 nv_wr32(dev, 0x40442c, 0x0);
2244 nv_wr32(dev, 0x404430, 0x0);
2245 nv_wr32(dev, 0x404434, 0x0);
2246 nv_wr32(dev, 0x404438, 0x0);
2247 nv_wr32(dev, 0x404460, 0x0);
2248 nv_wr32(dev, 0x404464, 0x0);
2249 nv_wr32(dev, 0x404468, 0xffffff);
2250 nv_wr32(dev, 0x40446c, 0x0);
2251 nv_wr32(dev, 0x404480, 0x1);
2252 nv_wr32(dev, 0x404498, 0x1);
2253}
2254
2255static void
2256nve0_graph_generate_unk46xx(struct drm_device *dev)
2257{
2258 nv_wr32(dev, 0x404604, 0x14);
2259 nv_wr32(dev, 0x404608, 0x0);
2260 nv_wr32(dev, 0x40460c, 0x3fff);
2261 nv_wr32(dev, 0x404610, 0x100);
2262 nv_wr32(dev, 0x404618, 0x0);
2263 nv_wr32(dev, 0x40461c, 0x0);
2264 nv_wr32(dev, 0x404620, 0x0);
2265 nv_wr32(dev, 0x404624, 0x0);
2266 nv_wr32(dev, 0x40462c, 0x0);
2267 nv_wr32(dev, 0x404630, 0x0);
2268 nv_wr32(dev, 0x404640, 0x0);
2269 nv_wr32(dev, 0x404654, 0x0);
2270 nv_wr32(dev, 0x404660, 0x0);
2271 nv_wr32(dev, 0x404678, 0x0);
2272 nv_wr32(dev, 0x40467c, 0x2);
2273 nv_wr32(dev, 0x404680, 0x0);
2274 nv_wr32(dev, 0x404684, 0x0);
2275 nv_wr32(dev, 0x404688, 0x0);
2276 nv_wr32(dev, 0x40468c, 0x0);
2277 nv_wr32(dev, 0x404690, 0x0);
2278 nv_wr32(dev, 0x404694, 0x0);
2279 nv_wr32(dev, 0x404698, 0x0);
2280 nv_wr32(dev, 0x40469c, 0x0);
2281 nv_wr32(dev, 0x4046a0, 0x7f0080);
2282 nv_wr32(dev, 0x4046a4, 0x0);
2283 nv_wr32(dev, 0x4046a8, 0x0);
2284 nv_wr32(dev, 0x4046ac, 0x0);
2285 nv_wr32(dev, 0x4046b0, 0x0);
2286 nv_wr32(dev, 0x4046b4, 0x0);
2287 nv_wr32(dev, 0x4046b8, 0x0);
2288 nv_wr32(dev, 0x4046bc, 0x0);
2289 nv_wr32(dev, 0x4046c0, 0x0);
2290 nv_wr32(dev, 0x4046c8, 0x0);
2291 nv_wr32(dev, 0x4046cc, 0x0);
2292 nv_wr32(dev, 0x4046d0, 0x0);
2293}
2294
2295static void
2296nve0_graph_generate_unk47xx(struct drm_device *dev)
2297{
2298 nv_wr32(dev, 0x404700, 0x0);
2299 nv_wr32(dev, 0x404704, 0x0);
2300 nv_wr32(dev, 0x404708, 0x0);
2301 nv_wr32(dev, 0x404718, 0x0);
2302 nv_wr32(dev, 0x40471c, 0x0);
2303 nv_wr32(dev, 0x404720, 0x0);
2304 nv_wr32(dev, 0x404724, 0x0);
2305 nv_wr32(dev, 0x404728, 0x0);
2306 nv_wr32(dev, 0x40472c, 0x0);
2307 nv_wr32(dev, 0x404730, 0x0);
2308 nv_wr32(dev, 0x404734, 0x100);
2309 nv_wr32(dev, 0x404738, 0x0);
2310 nv_wr32(dev, 0x40473c, 0x0);
2311 nv_wr32(dev, 0x404744, 0x0);
2312 nv_wr32(dev, 0x404748, 0x0);
2313 nv_wr32(dev, 0x404754, 0x0);
2314}
2315
2316static void
2317nve0_graph_generate_unk58xx(struct drm_device *dev)
2318{
2319 nv_wr32(dev, 0x405800, 0xf8000bf);
2320 nv_wr32(dev, 0x405830, 0x2180648);
2321 nv_wr32(dev, 0x405834, 0x8000000);
2322 nv_wr32(dev, 0x405838, 0x0);
2323 nv_wr32(dev, 0x405854, 0x0);
2324 nv_wr32(dev, 0x405870, 0x1);
2325 nv_wr32(dev, 0x405874, 0x1);
2326 nv_wr32(dev, 0x405878, 0x1);
2327 nv_wr32(dev, 0x40587c, 0x1);
2328 nv_wr32(dev, 0x405a00, 0x0);
2329 nv_wr32(dev, 0x405a04, 0x0);
2330 nv_wr32(dev, 0x405a18, 0x0);
2331 nv_wr32(dev, 0x405b00, 0x0);
2332 nv_wr32(dev, 0x405b10, 0x1000);
2333}
2334
2335static void
2336nve0_graph_generate_unk60xx(struct drm_device *dev)
2337{
2338 nv_wr32(dev, 0x406020, 0x4103c1);
2339 nv_wr32(dev, 0x406028, 0x1);
2340 nv_wr32(dev, 0x40602c, 0x1);
2341 nv_wr32(dev, 0x406030, 0x1);
2342 nv_wr32(dev, 0x406034, 0x1);
2343}
2344
2345static void
2346nve0_graph_generate_unk64xx(struct drm_device *dev)
2347{
2348 nv_wr32(dev, 0x4064a8, 0x0);
2349 nv_wr32(dev, 0x4064ac, 0x3fff);
2350 nv_wr32(dev, 0x4064b4, 0x0);
2351 nv_wr32(dev, 0x4064b8, 0x0);
2352 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2353 nv_wr32(dev, 0x4064c4, 0x192ffff);
2354 nv_wr32(dev, 0x4064c8, 0x1800600);
2355 nv_wr32(dev, 0x4064cc, 0x0);
2356 nv_wr32(dev, 0x4064d0, 0x0);
2357 nv_wr32(dev, 0x4064d4, 0x0);
2358 nv_wr32(dev, 0x4064d8, 0x0);
2359 nv_wr32(dev, 0x4064dc, 0x0);
2360 nv_wr32(dev, 0x4064e0, 0x0);
2361 nv_wr32(dev, 0x4064e4, 0x0);
2362 nv_wr32(dev, 0x4064e8, 0x0);
2363 nv_wr32(dev, 0x4064ec, 0x0);
2364 nv_wr32(dev, 0x4064fc, 0x22a);
2365}
2366
2367static void
2368nve0_graph_generate_unk70xx(struct drm_device *dev)
2369{
2370 nv_wr32(dev, 0x407040, 0x0);
2371}
2372
2373static void
2374nve0_graph_generate_unk78xx(struct drm_device *dev)
2375{
2376 nv_wr32(dev, 0x407804, 0x23);
2377 nv_wr32(dev, 0x40780c, 0xa418820);
2378 nv_wr32(dev, 0x407810, 0x62080e6);
2379 nv_wr32(dev, 0x407814, 0x20398a4);
2380 nv_wr32(dev, 0x407818, 0xe629062);
2381 nv_wr32(dev, 0x40781c, 0xa418820);
2382 nv_wr32(dev, 0x407820, 0xe6);
2383 nv_wr32(dev, 0x4078bc, 0x103);
2384}
2385
2386static void
2387nve0_graph_generate_unk80xx(struct drm_device *dev)
2388{
2389 nv_wr32(dev, 0x408000, 0x0);
2390 nv_wr32(dev, 0x408004, 0x0);
2391 nv_wr32(dev, 0x408008, 0x30);
2392 nv_wr32(dev, 0x40800c, 0x0);
2393 nv_wr32(dev, 0x408010, 0x0);
2394 nv_wr32(dev, 0x408014, 0x69);
2395 nv_wr32(dev, 0x408018, 0xe100e100);
2396 nv_wr32(dev, 0x408064, 0x0);
2397}
2398
2399static void
2400nve0_graph_generate_unk88xx(struct drm_device *dev)
2401{
2402 nv_wr32(dev, 0x408800, 0x2802a3c);
2403 nv_wr32(dev, 0x408804, 0x40);
2404 nv_wr32(dev, 0x408808, 0x1043e005);
2405 nv_wr32(dev, 0x408840, 0xb);
2406 nv_wr32(dev, 0x408900, 0x3080b801);
2407 nv_wr32(dev, 0x408904, 0x62000001);
2408 nv_wr32(dev, 0x408908, 0xc8102f);
2409 nv_wr32(dev, 0x408980, 0x11d);
2410}
2411
2412static void
2413nve0_graph_generate_gpc(struct drm_device *dev)
2414{
2415 nv_wr32(dev, 0x418380, 0x16);
2416 nv_wr32(dev, 0x418400, 0x38004e00);
2417 nv_wr32(dev, 0x418404, 0x71e0ffff);
2418 nv_wr32(dev, 0x41840c, 0x1008);
2419 nv_wr32(dev, 0x418410, 0xfff0fff);
2420 nv_wr32(dev, 0x418414, 0x2200fff);
2421 nv_wr32(dev, 0x418450, 0x0);
2422 nv_wr32(dev, 0x418454, 0x0);
2423 nv_wr32(dev, 0x418458, 0x0);
2424 nv_wr32(dev, 0x41845c, 0x0);
2425 nv_wr32(dev, 0x418460, 0x0);
2426 nv_wr32(dev, 0x418464, 0x0);
2427 nv_wr32(dev, 0x418468, 0x1);
2428 nv_wr32(dev, 0x41846c, 0x0);
2429 nv_wr32(dev, 0x418470, 0x0);
2430 nv_wr32(dev, 0x418600, 0x1f);
2431 nv_wr32(dev, 0x418684, 0xf);
2432 nv_wr32(dev, 0x418700, 0x2);
2433 nv_wr32(dev, 0x418704, 0x80);
2434 nv_wr32(dev, 0x418708, 0x0);
2435 nv_wr32(dev, 0x41870c, 0x0);
2436 nv_wr32(dev, 0x418710, 0x0);
2437 nv_wr32(dev, 0x418800, 0x7006860a);
2438 nv_wr32(dev, 0x418808, 0x0);
2439 nv_wr32(dev, 0x41880c, 0x0);
2440 nv_wr32(dev, 0x418810, 0x0);
2441 nv_wr32(dev, 0x418828, 0x44);
2442 nv_wr32(dev, 0x418830, 0x10000001);
2443 nv_wr32(dev, 0x4188d8, 0x8);
2444 nv_wr32(dev, 0x4188e0, 0x1000000);
2445 nv_wr32(dev, 0x4188e8, 0x0);
2446 nv_wr32(dev, 0x4188ec, 0x0);
2447 nv_wr32(dev, 0x4188f0, 0x0);
2448 nv_wr32(dev, 0x4188f4, 0x0);
2449 nv_wr32(dev, 0x4188f8, 0x0);
2450 nv_wr32(dev, 0x4188fc, 0x20100018);
2451 nv_wr32(dev, 0x41891c, 0xff00ff);
2452 nv_wr32(dev, 0x418924, 0x0);
2453 nv_wr32(dev, 0x418928, 0xffff00);
2454 nv_wr32(dev, 0x41892c, 0xff00);
2455 nv_wr32(dev, 0x418a00, 0x0);
2456 nv_wr32(dev, 0x418a04, 0x0);
2457 nv_wr32(dev, 0x418a08, 0x0);
2458 nv_wr32(dev, 0x418a0c, 0x10000);
2459 nv_wr32(dev, 0x418a10, 0x0);
2460 nv_wr32(dev, 0x418a14, 0x0);
2461 nv_wr32(dev, 0x418a18, 0x0);
2462 nv_wr32(dev, 0x418a20, 0x0);
2463 nv_wr32(dev, 0x418a24, 0x0);
2464 nv_wr32(dev, 0x418a28, 0x0);
2465 nv_wr32(dev, 0x418a2c, 0x10000);
2466 nv_wr32(dev, 0x418a30, 0x0);
2467 nv_wr32(dev, 0x418a34, 0x0);
2468 nv_wr32(dev, 0x418a38, 0x0);
2469 nv_wr32(dev, 0x418a40, 0x0);
2470 nv_wr32(dev, 0x418a44, 0x0);
2471 nv_wr32(dev, 0x418a48, 0x0);
2472 nv_wr32(dev, 0x418a4c, 0x10000);
2473 nv_wr32(dev, 0x418a50, 0x0);
2474 nv_wr32(dev, 0x418a54, 0x0);
2475 nv_wr32(dev, 0x418a58, 0x0);
2476 nv_wr32(dev, 0x418a60, 0x0);
2477 nv_wr32(dev, 0x418a64, 0x0);
2478 nv_wr32(dev, 0x418a68, 0x0);
2479 nv_wr32(dev, 0x418a6c, 0x10000);
2480 nv_wr32(dev, 0x418a70, 0x0);
2481 nv_wr32(dev, 0x418a74, 0x0);
2482 nv_wr32(dev, 0x418a78, 0x0);
2483 nv_wr32(dev, 0x418a80, 0x0);
2484 nv_wr32(dev, 0x418a84, 0x0);
2485 nv_wr32(dev, 0x418a88, 0x0);
2486 nv_wr32(dev, 0x418a8c, 0x10000);
2487 nv_wr32(dev, 0x418a90, 0x0);
2488 nv_wr32(dev, 0x418a94, 0x0);
2489 nv_wr32(dev, 0x418a98, 0x0);
2490 nv_wr32(dev, 0x418aa0, 0x0);
2491 nv_wr32(dev, 0x418aa4, 0x0);
2492 nv_wr32(dev, 0x418aa8, 0x0);
2493 nv_wr32(dev, 0x418aac, 0x10000);
2494 nv_wr32(dev, 0x418ab0, 0x0);
2495 nv_wr32(dev, 0x418ab4, 0x0);
2496 nv_wr32(dev, 0x418ab8, 0x0);
2497 nv_wr32(dev, 0x418ac0, 0x0);
2498 nv_wr32(dev, 0x418ac4, 0x0);
2499 nv_wr32(dev, 0x418ac8, 0x0);
2500 nv_wr32(dev, 0x418acc, 0x10000);
2501 nv_wr32(dev, 0x418ad0, 0x0);
2502 nv_wr32(dev, 0x418ad4, 0x0);
2503 nv_wr32(dev, 0x418ad8, 0x0);
2504 nv_wr32(dev, 0x418ae0, 0x0);
2505 nv_wr32(dev, 0x418ae4, 0x0);
2506 nv_wr32(dev, 0x418ae8, 0x0);
2507 nv_wr32(dev, 0x418aec, 0x10000);
2508 nv_wr32(dev, 0x418af0, 0x0);
2509 nv_wr32(dev, 0x418af4, 0x0);
2510 nv_wr32(dev, 0x418af8, 0x0);
2511 nv_wr32(dev, 0x418b00, 0x6);
2512 nv_wr32(dev, 0x418b08, 0xa418820);
2513 nv_wr32(dev, 0x418b0c, 0x62080e6);
2514 nv_wr32(dev, 0x418b10, 0x20398a4);
2515 nv_wr32(dev, 0x418b14, 0xe629062);
2516 nv_wr32(dev, 0x418b18, 0xa418820);
2517 nv_wr32(dev, 0x418b1c, 0xe6);
2518 nv_wr32(dev, 0x418bb8, 0x103);
2519 nv_wr32(dev, 0x418c08, 0x1);
2520 nv_wr32(dev, 0x418c10, 0x0);
2521 nv_wr32(dev, 0x418c14, 0x0);
2522 nv_wr32(dev, 0x418c18, 0x0);
2523 nv_wr32(dev, 0x418c1c, 0x0);
2524 nv_wr32(dev, 0x418c20, 0x0);
2525 nv_wr32(dev, 0x418c24, 0x0);
2526 nv_wr32(dev, 0x418c28, 0x0);
2527 nv_wr32(dev, 0x418c2c, 0x0);
2528 nv_wr32(dev, 0x418c40, 0xffffffff);
2529 nv_wr32(dev, 0x418c6c, 0x1);
2530 nv_wr32(dev, 0x418c80, 0x20200004);
2531 nv_wr32(dev, 0x418c8c, 0x1);
2532 nv_wr32(dev, 0x419000, 0x780);
2533 nv_wr32(dev, 0x419004, 0x0);
2534 nv_wr32(dev, 0x419008, 0x0);
2535 nv_wr32(dev, 0x419014, 0x4);
2536}
2537
2538static void
2539nve0_graph_generate_tpc(struct drm_device *dev)
2540{
2541 nv_wr32(dev, 0x419848, 0x0);
2542 nv_wr32(dev, 0x419864, 0x129);
2543 nv_wr32(dev, 0x419888, 0x0);
2544 nv_wr32(dev, 0x419a00, 0xf0);
2545 nv_wr32(dev, 0x419a04, 0x1);
2546 nv_wr32(dev, 0x419a08, 0x21);
2547 nv_wr32(dev, 0x419a0c, 0x20000);
2548 nv_wr32(dev, 0x419a10, 0x0);
2549 nv_wr32(dev, 0x419a14, 0x200);
2550 nv_wr32(dev, 0x419a1c, 0xc000);
2551 nv_wr32(dev, 0x419a20, 0x800);
2552 nv_wr32(dev, 0x419a30, 0x1);
2553 nv_wr32(dev, 0x419ac4, 0x37f440);
2554 nv_wr32(dev, 0x419c00, 0xa);
2555 nv_wr32(dev, 0x419c04, 0x80000006);
2556 nv_wr32(dev, 0x419c08, 0x2);
2557 nv_wr32(dev, 0x419c20, 0x0);
2558 nv_wr32(dev, 0x419c24, 0x84210);
2559 nv_wr32(dev, 0x419c28, 0x3efbefbe);
2560 nv_wr32(dev, 0x419ce8, 0x0);
2561 nv_wr32(dev, 0x419cf4, 0x3203);
2562 nv_wr32(dev, 0x419e04, 0x0);
2563 nv_wr32(dev, 0x419e08, 0x0);
2564 nv_wr32(dev, 0x419e0c, 0x0);
2565 nv_wr32(dev, 0x419e10, 0x402);
2566 nv_wr32(dev, 0x419e44, 0x13eff2);
2567 nv_wr32(dev, 0x419e48, 0x0);
2568 nv_wr32(dev, 0x419e4c, 0x7f);
2569 nv_wr32(dev, 0x419e50, 0x0);
2570 nv_wr32(dev, 0x419e54, 0x0);
2571 nv_wr32(dev, 0x419e58, 0x0);
2572 nv_wr32(dev, 0x419e5c, 0x0);
2573 nv_wr32(dev, 0x419e60, 0x0);
2574 nv_wr32(dev, 0x419e64, 0x0);
2575 nv_wr32(dev, 0x419e68, 0x0);
2576 nv_wr32(dev, 0x419e6c, 0x0);
2577 nv_wr32(dev, 0x419e70, 0x0);
2578 nv_wr32(dev, 0x419e74, 0x0);
2579 nv_wr32(dev, 0x419e78, 0x0);
2580 nv_wr32(dev, 0x419e7c, 0x0);
2581 nv_wr32(dev, 0x419e80, 0x0);
2582 nv_wr32(dev, 0x419e84, 0x0);
2583 nv_wr32(dev, 0x419e88, 0x0);
2584 nv_wr32(dev, 0x419e8c, 0x0);
2585 nv_wr32(dev, 0x419e90, 0x0);
2586 nv_wr32(dev, 0x419e94, 0x0);
2587 nv_wr32(dev, 0x419e98, 0x0);
2588 nv_wr32(dev, 0x419eac, 0x1fcf);
2589 nv_wr32(dev, 0x419eb0, 0xd3f);
2590 nv_wr32(dev, 0x419ec8, 0x1304f);
2591 nv_wr32(dev, 0x419f30, 0x0);
2592 nv_wr32(dev, 0x419f34, 0x0);
2593 nv_wr32(dev, 0x419f38, 0x0);
2594 nv_wr32(dev, 0x419f3c, 0x0);
2595 nv_wr32(dev, 0x419f40, 0x0);
2596 nv_wr32(dev, 0x419f44, 0x0);
2597 nv_wr32(dev, 0x419f48, 0x0);
2598 nv_wr32(dev, 0x419f4c, 0x0);
2599 nv_wr32(dev, 0x419f58, 0x0);
2600 nv_wr32(dev, 0x419f78, 0xb);
2601}
2602
2603static void
2604nve0_graph_generate_tpcunk(struct drm_device *dev)
2605{
2606 nv_wr32(dev, 0x41be24, 0x6);
2607 nv_wr32(dev, 0x41bec0, 0x12180000);
2608 nv_wr32(dev, 0x41bec4, 0x37f7f);
2609 nv_wr32(dev, 0x41bee4, 0x6480430);
2610 nv_wr32(dev, 0x41bf00, 0xa418820);
2611 nv_wr32(dev, 0x41bf04, 0x62080e6);
2612 nv_wr32(dev, 0x41bf08, 0x20398a4);
2613 nv_wr32(dev, 0x41bf0c, 0xe629062);
2614 nv_wr32(dev, 0x41bf10, 0xa418820);
2615 nv_wr32(dev, 0x41bf14, 0xe6);
2616 nv_wr32(dev, 0x41bfd0, 0x900103);
2617 nv_wr32(dev, 0x41bfe0, 0x400001);
2618 nv_wr32(dev, 0x41bfe4, 0x0);
2619}
2620
2621int
2622nve0_grctx_generate(struct nouveau_channel *chan)
2623{
2624 struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
2625 struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
2626 struct drm_device *dev = chan->dev;
2627 u32 data[6] = {}, data2[2] = {}, tmp;
2628 u32 tpc_set = 0, tpc_mask = 0;
2629 u8 tpcnr[GPC_MAX], a, b;
2630 u8 shift, ntpcv;
2631 int i, gpc, tpc, id;
2632
2633 nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
2634 nv_wr32(dev, 0x400204, 0x00000000);
2635 nv_wr32(dev, 0x400208, 0x00000000);
2636
2637 nve0_graph_generate_unk40xx(dev);
2638 nve0_graph_generate_unk44xx(dev);
2639 nve0_graph_generate_unk46xx(dev);
2640 nve0_graph_generate_unk47xx(dev);
2641 nve0_graph_generate_unk58xx(dev);
2642 nve0_graph_generate_unk60xx(dev);
2643 nve0_graph_generate_unk64xx(dev);
2644 nve0_graph_generate_unk70xx(dev);
2645 nve0_graph_generate_unk78xx(dev);
2646 nve0_graph_generate_unk80xx(dev);
2647 nve0_graph_generate_unk88xx(dev);
2648 nve0_graph_generate_gpc(dev);
2649 nve0_graph_generate_tpc(dev);
2650 nve0_graph_generate_tpcunk(dev);
2651
2652 nv_wr32(dev, 0x404154, 0x0);
2653
2654 for (i = 0; i < grch->mmio_nr * 8; i += 8) {
2655 u32 reg = nv_ro32(grch->mmio, i + 0);
2656 u32 val = nv_ro32(grch->mmio, i + 4);
2657 nv_wr32(dev, reg, val);
2658 }
2659
2660 nv_wr32(dev, 0x418c6c, 0x1);
2661 nv_wr32(dev, 0x41980c, 0x10);
2662 nv_wr32(dev, 0x41be08, 0x4);
2663 nv_wr32(dev, 0x4064c0, 0x801a00f0);
2664 nv_wr32(dev, 0x405800, 0xf8000bf);
2665 nv_wr32(dev, 0x419c00, 0xa);
2666
2667 for (tpc = 0, id = 0; tpc < 4; tpc++) {
2668 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
2669 if (tpc < priv->tpc_nr[gpc]) {
2670 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
2671 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
2672 nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
2673 nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
2674 }
2675
2676 nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
2677 nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
2678 }
2679 }
2680
2681 tmp = 0;
2682 for (i = 0; i < priv->gpc_nr; i++)
2683 tmp |= priv->tpc_nr[i] << (i * 4);
2684 nv_wr32(dev, 0x406028, tmp);
2685 nv_wr32(dev, 0x405870, tmp);
2686
2687 nv_wr32(dev, 0x40602c, 0x0);
2688 nv_wr32(dev, 0x405874, 0x0);
2689 nv_wr32(dev, 0x406030, 0x0);
2690 nv_wr32(dev, 0x405878, 0x0);
2691 nv_wr32(dev, 0x406034, 0x0);
2692 nv_wr32(dev, 0x40587c, 0x0);
2693
2694 /* calculate first set of magics */
2695 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2696
2697 gpc = -1;
2698 for (tpc = 0; tpc < priv->tpc_total; tpc++) {
2699 do {
2700 gpc = (gpc + 1) % priv->gpc_nr;
2701 } while (!tpcnr[gpc]);
2702 tpcnr[gpc]--;
2703
2704 data[tpc / 6] |= gpc << ((tpc % 6) * 5);
2705 }
2706
2707 for (; tpc < 32; tpc++)
2708 data[tpc / 6] |= 7 << ((tpc % 6) * 5);
2709
2710 /* and the second... */
2711 shift = 0;
2712 ntpcv = priv->tpc_total;
2713 while (!(ntpcv & (1 << 4))) {
2714 ntpcv <<= 1;
2715 shift++;
2716 }
2717
2718 data2[0] = ntpcv << 16;
2719 data2[0] |= shift << 21;
2720 data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
2721 data2[0] |= priv->tpc_total << 8;
2722 data2[0] |= priv->magic_not_rop_nr;
2723 for (i = 1; i < 7; i++)
2724 data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
2725
2726 /* and write it all the various parts of PGRAPH */
2727 nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2728 for (i = 0; i < 6; i++)
2729 nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
2730
2731 nv_wr32(dev, 0x41bfd0, data2[0]);
2732 nv_wr32(dev, 0x41bfe4, data2[1]);
2733 for (i = 0; i < 6; i++)
2734 nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
2735
2736 nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
2737 for (i = 0; i < 6; i++)
2738 nv_wr32(dev, 0x40780c + (i * 4), data[i]);
2739
2740
2741 memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
2742 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
2743 tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
2744
2745 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
2746 a = (i * (priv->tpc_total - 1)) / 32;
2747 if (a != b) {
2748 b = a;
2749 do {
2750 gpc = (gpc + 1) % priv->gpc_nr;
2751 } while (!tpcnr[gpc]);
2752 tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
2753
2754 tpc_set |= 1 << ((gpc * 8) + tpc);
2755 }
2756
2757 nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
2758 nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
2759 }
2760
2761 for (i = 0; i < 8; i++)
2762 nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
2763
2764 nv_wr32(dev, 0x405b00, 0x201);
2765 nv_wr32(dev, 0x408850, 0x2);
2766 nv_wr32(dev, 0x408958, 0x2);
2767 nv_wr32(dev, 0x419f78, 0xa);
2768
2769 nve0_grctx_generate_icmd(dev);
2770 nve0_grctx_generate_a097(dev);
2771 nve0_grctx_generate_902d(dev);
2772
2773 nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
2774 nv_wr32(dev, 0x418800, 0x7026860a); //XXX
2775 nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
2776 return 0;
2777}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2817101fb167..96184d02c8d9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -83,25 +83,19 @@ static void atombios_scaler_setup(struct drm_crtc *crtc)
83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 83 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
84 ENABLE_SCALER_PS_ALLOCATION args; 84 ENABLE_SCALER_PS_ALLOCATION args;
85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); 85 int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
86 86 struct radeon_encoder *radeon_encoder =
87 to_radeon_encoder(radeon_crtc->encoder);
87 /* fixme - fill in enc_priv for atom dac */ 88 /* fixme - fill in enc_priv for atom dac */
88 enum radeon_tv_std tv_std = TV_STD_NTSC; 89 enum radeon_tv_std tv_std = TV_STD_NTSC;
89 bool is_tv = false, is_cv = false; 90 bool is_tv = false, is_cv = false;
90 struct drm_encoder *encoder;
91 91
92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) 92 if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
93 return; 93 return;
94 94
95 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 95 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
96 /* find tv std */ 96 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
97 if (encoder->crtc == crtc) { 97 tv_std = tv_dac->tv_std;
98 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 98 is_tv = true;
99 if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
100 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
101 tv_std = tv_dac->tv_std;
102 is_tv = true;
103 }
104 }
105 } 99 }
106 100
107 memset(&args, 0, sizeof(args)); 101 memset(&args, 0, sizeof(args));
@@ -533,99 +527,87 @@ union adjust_pixel_clock {
533}; 527};
534 528
535static u32 atombios_adjust_pll(struct drm_crtc *crtc, 529static u32 atombios_adjust_pll(struct drm_crtc *crtc,
536 struct drm_display_mode *mode, 530 struct drm_display_mode *mode)
537 struct radeon_pll *pll,
538 bool ss_enabled,
539 struct radeon_atom_ss *ss)
540{ 531{
532 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
541 struct drm_device *dev = crtc->dev; 533 struct drm_device *dev = crtc->dev;
542 struct radeon_device *rdev = dev->dev_private; 534 struct radeon_device *rdev = dev->dev_private;
543 struct drm_encoder *encoder = NULL; 535 struct drm_encoder *encoder = radeon_crtc->encoder;
544 struct radeon_encoder *radeon_encoder = NULL; 536 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
545 struct drm_connector *connector = NULL; 537 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
546 u32 adjusted_clock = mode->clock; 538 u32 adjusted_clock = mode->clock;
547 int encoder_mode = 0; 539 int encoder_mode = atombios_get_encoder_mode(encoder);
548 u32 dp_clock = mode->clock; 540 u32 dp_clock = mode->clock;
549 int bpc = 8; 541 int bpc = radeon_get_monitor_bpc(connector);
550 bool is_duallink = false; 542 bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
551 543
552 /* reset the pll flags */ 544 /* reset the pll flags */
553 pll->flags = 0; 545 radeon_crtc->pll_flags = 0;
554 546
555 if (ASIC_IS_AVIVO(rdev)) { 547 if (ASIC_IS_AVIVO(rdev)) {
556 if ((rdev->family == CHIP_RS600) || 548 if ((rdev->family == CHIP_RS600) ||
557 (rdev->family == CHIP_RS690) || 549 (rdev->family == CHIP_RS690) ||
558 (rdev->family == CHIP_RS740)) 550 (rdev->family == CHIP_RS740))
559 pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/ 551 radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
560 RADEON_PLL_PREFER_CLOSEST_LOWER); 552 RADEON_PLL_PREFER_CLOSEST_LOWER);
561 553
562 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ 554 if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
563 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 555 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
564 else 556 else
565 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 557 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
566 558
567 if (rdev->family < CHIP_RV770) 559 if (rdev->family < CHIP_RV770)
568 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 560 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
569 /* use frac fb div on APUs */ 561 /* use frac fb div on APUs */
570 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
571 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
572 } else { 564 } else {
573 pll->flags |= RADEON_PLL_LEGACY; 565 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
574 566
575 if (mode->clock > 200000) /* range limits??? */ 567 if (mode->clock > 200000) /* range limits??? */
576 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 568 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
577 else 569 else
578 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 570 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
579 } 571 }
580 572
581 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 573 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
582 if (encoder->crtc == crtc) { 574 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
583 radeon_encoder = to_radeon_encoder(encoder); 575 if (connector) {
584 connector = radeon_get_connector_for_encoder(encoder); 576 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
585 bpc = radeon_get_monitor_bpc(connector); 577 struct radeon_connector_atom_dig *dig_connector =
586 encoder_mode = atombios_get_encoder_mode(encoder); 578 radeon_connector->con_priv;
587 is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
588 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
589 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
590 if (connector) {
591 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
592 struct radeon_connector_atom_dig *dig_connector =
593 radeon_connector->con_priv;
594
595 dp_clock = dig_connector->dp_clock;
596 }
597 }
598 579
599 /* use recommended ref_div for ss */ 580 dp_clock = dig_connector->dp_clock;
600 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 581 }
601 if (ss_enabled) { 582 }
602 if (ss->refdiv) {
603 pll->flags |= RADEON_PLL_USE_REF_DIV;
604 pll->reference_div = ss->refdiv;
605 if (ASIC_IS_AVIVO(rdev))
606 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
607 }
608 }
609 }
610 583
611 if (ASIC_IS_AVIVO(rdev)) { 584 /* use recommended ref_div for ss */
612 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 585 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
613 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 586 if (radeon_crtc->ss_enabled) {
614 adjusted_clock = mode->clock * 2; 587 if (radeon_crtc->ss.refdiv) {
615 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 588 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
616 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 589 radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
617 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 590 if (ASIC_IS_AVIVO(rdev))
618 pll->flags |= RADEON_PLL_IS_LCD; 591 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
619 } else {
620 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
621 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
622 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
623 pll->flags |= RADEON_PLL_USE_REF_DIV;
624 } 592 }
625 break;
626 } 593 }
627 } 594 }
628 595
596 if (ASIC_IS_AVIVO(rdev)) {
597 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
598 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
599 adjusted_clock = mode->clock * 2;
600 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
601 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
602 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
603 radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
604 } else {
605 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
606 radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
607 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
608 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
609 }
610
629 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock 611 /* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
630 * accordingly based on the encoder/transmitter to work around 612 * accordingly based on the encoder/transmitter to work around
631 * special hw requirements. 613 * special hw requirements.
@@ -650,7 +632,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
650 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); 632 args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
651 args.v1.ucTransmitterID = radeon_encoder->encoder_id; 633 args.v1.ucTransmitterID = radeon_encoder->encoder_id;
652 args.v1.ucEncodeMode = encoder_mode; 634 args.v1.ucEncodeMode = encoder_mode;
653 if (ss_enabled && ss->percentage) 635 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
654 args.v1.ucConfig |= 636 args.v1.ucConfig |=
655 ADJUST_DISPLAY_CONFIG_SS_ENABLE; 637 ADJUST_DISPLAY_CONFIG_SS_ENABLE;
656 638
@@ -663,7 +645,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
663 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; 645 args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
664 args.v3.sInput.ucEncodeMode = encoder_mode; 646 args.v3.sInput.ucEncodeMode = encoder_mode;
665 args.v3.sInput.ucDispPllConfig = 0; 647 args.v3.sInput.ucDispPllConfig = 0;
666 if (ss_enabled && ss->percentage) 648 if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
667 args.v3.sInput.ucDispPllConfig |= 649 args.v3.sInput.ucDispPllConfig |=
668 DISPPLL_CONFIG_SS_ENABLE; 650 DISPPLL_CONFIG_SS_ENABLE;
669 if (ENCODER_MODE_IS_DP(encoder_mode)) { 651 if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -695,14 +677,14 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
695 index, (uint32_t *)&args); 677 index, (uint32_t *)&args);
696 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 678 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
697 if (args.v3.sOutput.ucRefDiv) { 679 if (args.v3.sOutput.ucRefDiv) {
698 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 680 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
699 pll->flags |= RADEON_PLL_USE_REF_DIV; 681 radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
700 pll->reference_div = args.v3.sOutput.ucRefDiv; 682 radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
701 } 683 }
702 if (args.v3.sOutput.ucPostDiv) { 684 if (args.v3.sOutput.ucPostDiv) {
703 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; 685 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
704 pll->flags |= RADEON_PLL_USE_POST_DIV; 686 radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
705 pll->post_div = args.v3.sOutput.ucPostDiv; 687 radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
706 } 688 }
707 break; 689 break;
708 default: 690 default:
@@ -837,7 +819,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
837 args.v3.ucFracFbDiv = frac_fb_div; 819 args.v3.ucFracFbDiv = frac_fb_div;
838 args.v3.ucPostDiv = post_div; 820 args.v3.ucPostDiv = post_div;
839 args.v3.ucPpll = pll_id; 821 args.v3.ucPpll = pll_id;
840 args.v3.ucMiscInfo = (pll_id << 2); 822 if (crtc_id == ATOM_CRTC2)
823 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
824 else
825 args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
841 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) 826 if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
842 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; 827 args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
843 args.v3.ucTransmitterId = encoder_id; 828 args.v3.ucTransmitterId = encoder_id;
@@ -907,58 +892,29 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
907 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 892 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
908} 893}
909 894
910static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) 895static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
911{ 896{
912 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 897 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
913 struct drm_device *dev = crtc->dev; 898 struct drm_device *dev = crtc->dev;
914 struct radeon_device *rdev = dev->dev_private; 899 struct radeon_device *rdev = dev->dev_private;
915 struct drm_encoder *encoder = NULL; 900 struct radeon_encoder *radeon_encoder =
916 struct radeon_encoder *radeon_encoder = NULL; 901 to_radeon_encoder(radeon_crtc->encoder);
917 u32 pll_clock = mode->clock; 902 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
918 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
919 struct radeon_pll *pll;
920 u32 adjusted_clock;
921 int encoder_mode = 0;
922 struct radeon_atom_ss ss;
923 bool ss_enabled = false;
924 int bpc = 8;
925 903
926 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 904 radeon_crtc->bpc = 8;
927 if (encoder->crtc == crtc) { 905 radeon_crtc->ss_enabled = false;
928 radeon_encoder = to_radeon_encoder(encoder);
929 encoder_mode = atombios_get_encoder_mode(encoder);
930 break;
931 }
932 }
933
934 if (!radeon_encoder)
935 return;
936
937 switch (radeon_crtc->pll_id) {
938 case ATOM_PPLL1:
939 pll = &rdev->clock.p1pll;
940 break;
941 case ATOM_PPLL2:
942 pll = &rdev->clock.p2pll;
943 break;
944 case ATOM_DCPLL:
945 case ATOM_PPLL_INVALID:
946 default:
947 pll = &rdev->clock.dcpll;
948 break;
949 }
950 906
951 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || 907 if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
952 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { 908 (radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
953 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 909 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
954 struct drm_connector *connector = 910 struct drm_connector *connector =
955 radeon_get_connector_for_encoder(encoder); 911 radeon_get_connector_for_encoder(radeon_crtc->encoder);
956 struct radeon_connector *radeon_connector = 912 struct radeon_connector *radeon_connector =
957 to_radeon_connector(connector); 913 to_radeon_connector(connector);
958 struct radeon_connector_atom_dig *dig_connector = 914 struct radeon_connector_atom_dig *dig_connector =
959 radeon_connector->con_priv; 915 radeon_connector->con_priv;
960 int dp_clock; 916 int dp_clock;
961 bpc = radeon_get_monitor_bpc(connector); 917 radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
962 918
963 switch (encoder_mode) { 919 switch (encoder_mode) {
964 case ATOM_ENCODER_MODE_DP_MST: 920 case ATOM_ENCODER_MODE_DP_MST:
@@ -966,45 +922,54 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
966 /* DP/eDP */ 922 /* DP/eDP */
967 dp_clock = dig_connector->dp_clock / 10; 923 dp_clock = dig_connector->dp_clock / 10;
968 if (ASIC_IS_DCE4(rdev)) 924 if (ASIC_IS_DCE4(rdev))
969 ss_enabled = 925 radeon_crtc->ss_enabled =
970 radeon_atombios_get_asic_ss_info(rdev, &ss, 926 radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
971 ASIC_INTERNAL_SS_ON_DP, 927 ASIC_INTERNAL_SS_ON_DP,
972 dp_clock); 928 dp_clock);
973 else { 929 else {
974 if (dp_clock == 16200) { 930 if (dp_clock == 16200) {
975 ss_enabled = 931 radeon_crtc->ss_enabled =
976 radeon_atombios_get_ppll_ss_info(rdev, &ss, 932 radeon_atombios_get_ppll_ss_info(rdev,
933 &radeon_crtc->ss,
977 ATOM_DP_SS_ID2); 934 ATOM_DP_SS_ID2);
978 if (!ss_enabled) 935 if (!radeon_crtc->ss_enabled)
979 ss_enabled = 936 radeon_crtc->ss_enabled =
980 radeon_atombios_get_ppll_ss_info(rdev, &ss, 937 radeon_atombios_get_ppll_ss_info(rdev,
938 &radeon_crtc->ss,
981 ATOM_DP_SS_ID1); 939 ATOM_DP_SS_ID1);
982 } else 940 } else
983 ss_enabled = 941 radeon_crtc->ss_enabled =
984 radeon_atombios_get_ppll_ss_info(rdev, &ss, 942 radeon_atombios_get_ppll_ss_info(rdev,
943 &radeon_crtc->ss,
985 ATOM_DP_SS_ID1); 944 ATOM_DP_SS_ID1);
986 } 945 }
987 break; 946 break;
988 case ATOM_ENCODER_MODE_LVDS: 947 case ATOM_ENCODER_MODE_LVDS:
989 if (ASIC_IS_DCE4(rdev)) 948 if (ASIC_IS_DCE4(rdev))
990 ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss, 949 radeon_crtc->ss_enabled =
991 dig->lcd_ss_id, 950 radeon_atombios_get_asic_ss_info(rdev,
992 mode->clock / 10); 951 &radeon_crtc->ss,
952 dig->lcd_ss_id,
953 mode->clock / 10);
993 else 954 else
994 ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss, 955 radeon_crtc->ss_enabled =
995 dig->lcd_ss_id); 956 radeon_atombios_get_ppll_ss_info(rdev,
957 &radeon_crtc->ss,
958 dig->lcd_ss_id);
996 break; 959 break;
997 case ATOM_ENCODER_MODE_DVI: 960 case ATOM_ENCODER_MODE_DVI:
998 if (ASIC_IS_DCE4(rdev)) 961 if (ASIC_IS_DCE4(rdev))
999 ss_enabled = 962 radeon_crtc->ss_enabled =
1000 radeon_atombios_get_asic_ss_info(rdev, &ss, 963 radeon_atombios_get_asic_ss_info(rdev,
964 &radeon_crtc->ss,
1001 ASIC_INTERNAL_SS_ON_TMDS, 965 ASIC_INTERNAL_SS_ON_TMDS,
1002 mode->clock / 10); 966 mode->clock / 10);
1003 break; 967 break;
1004 case ATOM_ENCODER_MODE_HDMI: 968 case ATOM_ENCODER_MODE_HDMI:
1005 if (ASIC_IS_DCE4(rdev)) 969 if (ASIC_IS_DCE4(rdev))
1006 ss_enabled = 970 radeon_crtc->ss_enabled =
1007 radeon_atombios_get_asic_ss_info(rdev, &ss, 971 radeon_atombios_get_asic_ss_info(rdev,
972 &radeon_crtc->ss,
1008 ASIC_INTERNAL_SS_ON_HDMI, 973 ASIC_INTERNAL_SS_ON_HDMI,
1009 mode->clock / 10); 974 mode->clock / 10);
1010 break; 975 break;
@@ -1014,43 +979,80 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
1014 } 979 }
1015 980
1016 /* adjust pixel clock as needed */ 981 /* adjust pixel clock as needed */
1017 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 982 radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
983
984 return true;
985}
986
987static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
988{
989 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
990 struct drm_device *dev = crtc->dev;
991 struct radeon_device *rdev = dev->dev_private;
992 struct radeon_encoder *radeon_encoder =
993 to_radeon_encoder(radeon_crtc->encoder);
994 u32 pll_clock = mode->clock;
995 u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
996 struct radeon_pll *pll;
997 int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
998
999 switch (radeon_crtc->pll_id) {
1000 case ATOM_PPLL1:
1001 pll = &rdev->clock.p1pll;
1002 break;
1003 case ATOM_PPLL2:
1004 pll = &rdev->clock.p2pll;
1005 break;
1006 case ATOM_DCPLL:
1007 case ATOM_PPLL_INVALID:
1008 default:
1009 pll = &rdev->clock.dcpll;
1010 break;
1011 }
1012
1013 /* update pll params */
1014 pll->flags = radeon_crtc->pll_flags;
1015 pll->reference_div = radeon_crtc->pll_reference_div;
1016 pll->post_div = radeon_crtc->pll_post_div;
1018 1017
1019 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 1018 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
1020 /* TV seems to prefer the legacy algo on some boards */ 1019 /* TV seems to prefer the legacy algo on some boards */
1021 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1020 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1022 &ref_div, &post_div); 1021 &fb_div, &frac_fb_div, &ref_div, &post_div);
1023 else if (ASIC_IS_AVIVO(rdev)) 1022 else if (ASIC_IS_AVIVO(rdev))
1024 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1023 radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
1025 &ref_div, &post_div); 1024 &fb_div, &frac_fb_div, &ref_div, &post_div);
1026 else 1025 else
1027 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 1026 radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
1028 &ref_div, &post_div); 1027 &fb_div, &frac_fb_div, &ref_div, &post_div);
1029 1028
1030 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); 1029 atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
1030 radeon_crtc->crtc_id, &radeon_crtc->ss);
1031 1031
1032 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, 1032 atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
1033 encoder_mode, radeon_encoder->encoder_id, mode->clock, 1033 encoder_mode, radeon_encoder->encoder_id, mode->clock,
1034 ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss); 1034 ref_div, fb_div, frac_fb_div, post_div,
1035 radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
1035 1036
1036 if (ss_enabled) { 1037 if (radeon_crtc->ss_enabled) {
1037 /* calculate ss amount and step size */ 1038 /* calculate ss amount and step size */
1038 if (ASIC_IS_DCE4(rdev)) { 1039 if (ASIC_IS_DCE4(rdev)) {
1039 u32 step_size; 1040 u32 step_size;
1040 u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000; 1041 u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
1041 ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK; 1042 radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
1042 ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) & 1043 radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
1043 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK; 1044 ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
1044 if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD) 1045 if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
1045 step_size = (4 * amount * ref_div * (ss.rate * 2048)) / 1046 step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
1046 (125 * 25 * pll->reference_freq / 100); 1047 (125 * 25 * pll->reference_freq / 100);
1047 else 1048 else
1048 step_size = (2 * amount * ref_div * (ss.rate * 2048)) / 1049 step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
1049 (125 * 25 * pll->reference_freq / 100); 1050 (125 * 25 * pll->reference_freq / 100);
1050 ss.step = step_size; 1051 radeon_crtc->ss.step = step_size;
1051 } 1052 }
1052 1053
1053 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); 1054 atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
1055 radeon_crtc->crtc_id, &radeon_crtc->ss);
1054 } 1056 }
1055} 1057}
1056 1058
@@ -1479,85 +1481,251 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
1479 } 1481 }
1480} 1482}
1481 1483
1484/**
1485 * radeon_get_pll_use_mask - look up a mask of which pplls are in use
1486 *
1487 * @crtc: drm crtc
1488 *
1489 * Returns the mask of which PPLLs (Pixel PLLs) are in use.
1490 */
1491static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
1492{
1493 struct drm_device *dev = crtc->dev;
1494 struct drm_crtc *test_crtc;
1495 struct radeon_crtc *test_radeon_crtc;
1496 u32 pll_in_use = 0;
1497
1498 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1499 if (crtc == test_crtc)
1500 continue;
1501
1502 test_radeon_crtc = to_radeon_crtc(test_crtc);
1503 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1504 pll_in_use |= (1 << test_radeon_crtc->pll_id);
1505 }
1506 return pll_in_use;
1507}
1508
1509/**
1510 * radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
1511 *
1512 * @crtc: drm crtc
1513 *
1514 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
1515 * also in DP mode. For DP, a single PPLL can be used for all DP
1516 * crtcs/encoders.
1517 */
1518static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
1519{
1520 struct drm_device *dev = crtc->dev;
1521 struct drm_crtc *test_crtc;
1522 struct radeon_crtc *test_radeon_crtc;
1523
1524 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1525 if (crtc == test_crtc)
1526 continue;
1527 test_radeon_crtc = to_radeon_crtc(test_crtc);
1528 if (test_radeon_crtc->encoder &&
1529 ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1530 /* for DP use the same PLL for all */
1531 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1532 return test_radeon_crtc->pll_id;
1533 }
1534 }
1535 return ATOM_PPLL_INVALID;
1536}
1537
1538/**
1539 * radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
1540 *
1541 * @crtc: drm crtc
1542 * @encoder: drm encoder
1543 *
1544 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
1545 * be shared (i.e., same clock).
1546 */
1547static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1548{
1549 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1550 struct drm_device *dev = crtc->dev;
1551 struct drm_crtc *test_crtc;
1552 struct radeon_crtc *test_radeon_crtc;
1553 u32 adjusted_clock, test_adjusted_clock;
1554
1555 adjusted_clock = radeon_crtc->adjusted_clock;
1556
1557 if (adjusted_clock == 0)
1558 return ATOM_PPLL_INVALID;
1559
1560 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
1561 if (crtc == test_crtc)
1562 continue;
1563 test_radeon_crtc = to_radeon_crtc(test_crtc);
1564 if (test_radeon_crtc->encoder &&
1565 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
1566 /* check if we are already driving this connector with another crtc */
1567 if (test_radeon_crtc->connector == radeon_crtc->connector) {
1568 /* if we are, return that pll */
1569 if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
1570 return test_radeon_crtc->pll_id;
1571 }
1572 /* for non-DP check the clock */
1573 test_adjusted_clock = test_radeon_crtc->adjusted_clock;
1574 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1575 (adjusted_clock == test_adjusted_clock) &&
1576 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1577 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1578 return test_radeon_crtc->pll_id;
1579 }
1580 }
1581 return ATOM_PPLL_INVALID;
1582}
1583
1584/**
1585 * radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
1586 *
1587 * @crtc: drm crtc
1588 *
1589 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
1590 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
1591 * monitors a dedicated PPLL must be used. If a particular board has
1592 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
1593 * as there is no need to program the PLL itself. If we are not able to
1594 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
1595 * avoid messing up an existing monitor.
1596 *
1597 * Asic specific PLL information
1598 *
1599 * DCE 6.1
1600 * - PPLL2 is only available to UNIPHYA (both DP and non-DP)
1601 * - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
1602 *
1603 * DCE 6.0
1604 * - PPLL0 is available to all UNIPHY (DP only)
1605 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1606 *
1607 * DCE 5.0
1608 * - DCPLL is available to all UNIPHY (DP only)
1609 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1610 *
1611 * DCE 3.0/4.0/4.1
1612 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
1613 *
1614 */
1482static int radeon_atom_pick_pll(struct drm_crtc *crtc) 1615static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1483{ 1616{
1484 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1617 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1485 struct drm_device *dev = crtc->dev; 1618 struct drm_device *dev = crtc->dev;
1486 struct radeon_device *rdev = dev->dev_private; 1619 struct radeon_device *rdev = dev->dev_private;
1487 struct drm_encoder *test_encoder; 1620 struct radeon_encoder *radeon_encoder =
1488 struct drm_crtc *test_crtc; 1621 to_radeon_encoder(radeon_crtc->encoder);
1489 uint32_t pll_in_use = 0; 1622 u32 pll_in_use;
1623 int pll;
1490 1624
1491 if (ASIC_IS_DCE61(rdev)) { 1625 if (ASIC_IS_DCE61(rdev)) {
1492 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1626 struct radeon_encoder_atom_dig *dig =
1493 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1627 radeon_encoder->enc_priv;
1494 struct radeon_encoder *test_radeon_encoder = 1628
1495 to_radeon_encoder(test_encoder); 1629 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
1496 struct radeon_encoder_atom_dig *dig = 1630 (dig->linkb == false))
1497 test_radeon_encoder->enc_priv; 1631 /* UNIPHY A uses PPLL2 */
1498 1632 return ATOM_PPLL2;
1499 if ((test_radeon_encoder->encoder_id == 1633 else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1500 ENCODER_OBJECT_ID_INTERNAL_UNIPHY) && 1634 /* UNIPHY B/C/D/E/F */
1501 (dig->linkb == false)) /* UNIPHY A uses PPLL2 */ 1635 if (rdev->clock.dp_extclk)
1502 return ATOM_PPLL2; 1636 /* skip PPLL programming if using ext clock */
1637 return ATOM_PPLL_INVALID;
1638 else {
1639 /* use the same PPLL for all DP monitors */
1640 pll = radeon_get_shared_dp_ppll(crtc);
1641 if (pll != ATOM_PPLL_INVALID)
1642 return pll;
1503 } 1643 }
1644 } else {
1645 /* use the same PPLL for all monitors with the same clock */
1646 pll = radeon_get_shared_nondp_ppll(crtc);
1647 if (pll != ATOM_PPLL_INVALID)
1648 return pll;
1504 } 1649 }
1505 /* UNIPHY B/C/D/E/F */ 1650 /* UNIPHY B/C/D/E/F */
1506 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1651 pll_in_use = radeon_get_pll_use_mask(crtc);
1507 struct radeon_crtc *radeon_test_crtc; 1652 if (!(pll_in_use & (1 << ATOM_PPLL0)))
1508
1509 if (crtc == test_crtc)
1510 continue;
1511
1512 radeon_test_crtc = to_radeon_crtc(test_crtc);
1513 if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
1514 (radeon_test_crtc->pll_id == ATOM_PPLL1))
1515 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1516 }
1517 if (!(pll_in_use & 4))
1518 return ATOM_PPLL0; 1653 return ATOM_PPLL0;
1519 return ATOM_PPLL1; 1654 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1655 return ATOM_PPLL1;
1656 DRM_ERROR("unable to allocate a PPLL\n");
1657 return ATOM_PPLL_INVALID;
1520 } else if (ASIC_IS_DCE4(rdev)) { 1658 } else if (ASIC_IS_DCE4(rdev)) {
1521 list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { 1659 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1522 if (test_encoder->crtc && (test_encoder->crtc == crtc)) { 1660 * depending on the asic:
1523 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock, 1661 * DCE4: PPLL or ext clock
1524 * depending on the asic: 1662 * DCE5: PPLL, DCPLL, or ext clock
1525 * DCE4: PPLL or ext clock 1663 * DCE6: PPLL, PPLL0, or ext clock
1526 * DCE5: DCPLL or ext clock 1664 *
1527 * 1665 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
1528 * Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip 1666 * PPLL/DCPLL programming and only program the DP DTO for the
1529 * PPLL/DCPLL programming and only program the DP DTO for the 1667 * crtc virtual pixel clock.
1530 * crtc virtual pixel clock. 1668 */
1531 */ 1669 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1532 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { 1670 if (rdev->clock.dp_extclk)
1533 if (rdev->clock.dp_extclk) 1671 /* skip PPLL programming if using ext clock */
1534 return ATOM_PPLL_INVALID; 1672 return ATOM_PPLL_INVALID;
1535 else if (ASIC_IS_DCE6(rdev)) 1673 else if (ASIC_IS_DCE6(rdev))
1536 return ATOM_PPLL0; 1674 /* use PPLL0 for all DP */
1537 else if (ASIC_IS_DCE5(rdev)) 1675 return ATOM_PPLL0;
1538 return ATOM_DCPLL; 1676 else if (ASIC_IS_DCE5(rdev))
1539 } 1677 /* use DCPLL for all DP */
1678 return ATOM_DCPLL;
1679 else {
1680 /* use the same PPLL for all DP monitors */
1681 pll = radeon_get_shared_dp_ppll(crtc);
1682 if (pll != ATOM_PPLL_INVALID)
1683 return pll;
1540 } 1684 }
1685 } else {
1686 /* use the same PPLL for all monitors with the same clock */
1687 pll = radeon_get_shared_nondp_ppll(crtc);
1688 if (pll != ATOM_PPLL_INVALID)
1689 return pll;
1541 } 1690 }
1542 1691 /* all other cases */
1543 /* otherwise, pick one of the plls */ 1692 pll_in_use = radeon_get_pll_use_mask(crtc);
1544 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 1693 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1545 struct radeon_crtc *radeon_test_crtc; 1694 return ATOM_PPLL2;
1546 1695 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1547 if (crtc == test_crtc)
1548 continue;
1549
1550 radeon_test_crtc = to_radeon_crtc(test_crtc);
1551 if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
1552 (radeon_test_crtc->pll_id <= ATOM_PPLL2))
1553 pll_in_use |= (1 << radeon_test_crtc->pll_id);
1554 }
1555 if (!(pll_in_use & 1))
1556 return ATOM_PPLL1; 1696 return ATOM_PPLL1;
1557 return ATOM_PPLL2; 1697 DRM_ERROR("unable to allocate a PPLL\n");
1558 } else 1698 return ATOM_PPLL_INVALID;
1559 return radeon_crtc->crtc_id; 1699 } else {
1560 1700 if (ASIC_IS_AVIVO(rdev)) {
1701 /* in DP mode, the DP ref clock can come from either PPLL
1702 * depending on the asic:
1703 * DCE3: PPLL1 or PPLL2
1704 */
1705 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1706 /* use the same PPLL for all DP monitors */
1707 pll = radeon_get_shared_dp_ppll(crtc);
1708 if (pll != ATOM_PPLL_INVALID)
1709 return pll;
1710 } else {
1711 /* use the same PPLL for all monitors with the same clock */
1712 pll = radeon_get_shared_nondp_ppll(crtc);
1713 if (pll != ATOM_PPLL_INVALID)
1714 return pll;
1715 }
1716 /* all other cases */
1717 pll_in_use = radeon_get_pll_use_mask(crtc);
1718 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1719 return ATOM_PPLL2;
1720 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1721 return ATOM_PPLL1;
1722 DRM_ERROR("unable to allocate a PPLL\n");
1723 return ATOM_PPLL_INVALID;
1724 } else {
1725 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
1726 return radeon_crtc->crtc_id;
1727 }
1728 }
1561} 1729}
1562 1730
1563void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) 1731void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
@@ -1588,18 +1756,13 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1588 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 1756 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1589 struct drm_device *dev = crtc->dev; 1757 struct drm_device *dev = crtc->dev;
1590 struct radeon_device *rdev = dev->dev_private; 1758 struct radeon_device *rdev = dev->dev_private;
1591 struct drm_encoder *encoder; 1759 struct radeon_encoder *radeon_encoder =
1760 to_radeon_encoder(radeon_crtc->encoder);
1592 bool is_tvcv = false; 1761 bool is_tvcv = false;
1593 1762
1594 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1763 if (radeon_encoder->active_device &
1595 /* find tv std */ 1764 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1596 if (encoder->crtc == crtc) { 1765 is_tvcv = true;
1597 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1598 if (radeon_encoder->active_device &
1599 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1600 is_tvcv = true;
1601 }
1602 }
1603 1766
1604 atombios_crtc_set_pll(crtc, adjusted_mode); 1767 atombios_crtc_set_pll(crtc, adjusted_mode);
1605 1768
@@ -1626,8 +1789,34 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1626 const struct drm_display_mode *mode, 1789 const struct drm_display_mode *mode,
1627 struct drm_display_mode *adjusted_mode) 1790 struct drm_display_mode *adjusted_mode)
1628{ 1791{
1792 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1793 struct drm_device *dev = crtc->dev;
1794 struct drm_encoder *encoder;
1795
1796 /* assign the encoder to the radeon crtc to avoid repeated lookups later */
1797 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1798 if (encoder->crtc == crtc) {
1799 radeon_crtc->encoder = encoder;
1800 radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
1801 break;
1802 }
1803 }
1804 if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
1805 radeon_crtc->encoder = NULL;
1806 radeon_crtc->connector = NULL;
1807 return false;
1808 }
1629 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1809 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1630 return false; 1810 return false;
1811 if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
1812 return false;
1813 /* pick pll */
1814 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1815 /* if we can't get a PPLL for a non-DP encoder, fail */
1816 if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
1817 !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
1818 return false;
1819
1631 return true; 1820 return true;
1632} 1821}
1633 1822
@@ -1638,8 +1827,6 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
1638 struct radeon_device *rdev = dev->dev_private; 1827 struct radeon_device *rdev = dev->dev_private;
1639 1828
1640 radeon_crtc->in_mode_set = true; 1829 radeon_crtc->in_mode_set = true;
1641 /* pick pll */
1642 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1643 1830
1644 /* disable crtc pair power gating before programming */ 1831 /* disable crtc pair power gating before programming */
1645 if (ASIC_IS_DCE6(rdev)) 1832 if (ASIC_IS_DCE6(rdev))
@@ -1697,7 +1884,10 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1697 break; 1884 break;
1698 } 1885 }
1699done: 1886done:
1700 radeon_crtc->pll_id = -1; 1887 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
1888 radeon_crtc->adjusted_clock = 0;
1889 radeon_crtc->encoder = NULL;
1890 radeon_crtc->connector = NULL;
1701} 1891}
1702 1892
1703static const struct drm_crtc_helper_funcs atombios_helper_funcs = { 1893static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -1746,6 +1936,9 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
1746 else 1936 else
1747 radeon_crtc->crtc_offset = 0; 1937 radeon_crtc->crtc_offset = 0;
1748 } 1938 }
1749 radeon_crtc->pll_id = -1; 1939 radeon_crtc->pll_id = ATOM_PPLL_INVALID;
1940 radeon_crtc->adjusted_clock = 0;
1941 radeon_crtc->encoder = NULL;
1942 radeon_crtc->connector = NULL;
1750 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); 1943 drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
1751} 1944}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index ea8e2d471c35..d5699fe4f1e8 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -653,9 +653,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
653 return false; 653 return false;
654 } 654 }
655 655
656 DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n", 656 DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
657 link_status[0], link_status[1], link_status[2],
658 link_status[3], link_status[4], link_status[5]);
659 return true; 657 return true;
660} 658}
661 659
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 8e2ee98e69d2..49cbb3795a10 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -28,9 +28,251 @@
28#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31#include <linux/backlight.h>
31 32
32extern int atom_debug; 33extern int atom_debug;
33 34
35static u8
36radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
37{
38 u8 backlight_level;
39 u32 bios_2_scratch;
40
41 if (rdev->family >= CHIP_R600)
42 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
43 else
44 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
45
46 backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
47 ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
48
49 return backlight_level;
50}
51
52static void
53radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
54 u8 backlight_level)
55{
56 u32 bios_2_scratch;
57
58 if (rdev->family >= CHIP_R600)
59 bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
60 else
61 bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
62
63 bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
64 bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
65 ATOM_S2_CURRENT_BL_LEVEL_MASK);
66
67 if (rdev->family >= CHIP_R600)
68 WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
69 else
70 WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
71}
72
73u8
74atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
75{
76 struct drm_device *dev = radeon_encoder->base.dev;
77 struct radeon_device *rdev = dev->dev_private;
78
79 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
80 return 0;
81
82 return radeon_atom_get_backlight_level_from_reg(rdev);
83}
84
85void
86atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
87{
88 struct drm_encoder *encoder = &radeon_encoder->base;
89 struct drm_device *dev = radeon_encoder->base.dev;
90 struct radeon_device *rdev = dev->dev_private;
91 struct radeon_encoder_atom_dig *dig;
92 DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
93 int index;
94
95 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
96 return;
97
98 if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
99 radeon_encoder->enc_priv) {
100 dig = radeon_encoder->enc_priv;
101 dig->backlight_level = level;
102 radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
103
104 switch (radeon_encoder->encoder_id) {
105 case ENCODER_OBJECT_ID_INTERNAL_LVDS:
106 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
107 index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
108 if (dig->backlight_level == 0) {
109 args.ucAction = ATOM_LCD_BLOFF;
110 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
111 } else {
112 args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
113 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
114 args.ucAction = ATOM_LCD_BLON;
115 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
116 }
117 break;
118 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
119 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
120 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
122 if (dig->backlight_level == 0)
123 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
124 else {
125 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
126 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
127 }
128 break;
129 default:
130 break;
131 }
132 }
133}
134
135#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
136
137static u8 radeon_atom_bl_level(struct backlight_device *bd)
138{
139 u8 level;
140
141 /* Convert brightness to hardware level */
142 if (bd->props.brightness < 0)
143 level = 0;
144 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
145 level = RADEON_MAX_BL_LEVEL;
146 else
147 level = bd->props.brightness;
148
149 return level;
150}
151
152static int radeon_atom_backlight_update_status(struct backlight_device *bd)
153{
154 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
155 struct radeon_encoder *radeon_encoder = pdata->encoder;
156
157 atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
158
159 return 0;
160}
161
162static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
163{
164 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
165 struct radeon_encoder *radeon_encoder = pdata->encoder;
166 struct drm_device *dev = radeon_encoder->base.dev;
167 struct radeon_device *rdev = dev->dev_private;
168
169 return radeon_atom_get_backlight_level_from_reg(rdev);
170}
171
172static const struct backlight_ops radeon_atom_backlight_ops = {
173 .get_brightness = radeon_atom_backlight_get_brightness,
174 .update_status = radeon_atom_backlight_update_status,
175};
176
177void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
178 struct drm_connector *drm_connector)
179{
180 struct drm_device *dev = radeon_encoder->base.dev;
181 struct radeon_device *rdev = dev->dev_private;
182 struct backlight_device *bd;
183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187
188 if (!radeon_encoder->enc_priv)
189 return;
190
191 if (!rdev->is_atom_bios)
192 return;
193
194 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
195 return;
196
197 pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
198 if (!pdata) {
199 DRM_ERROR("Memory allocation failed\n");
200 goto error;
201 }
202
203 memset(&props, 0, sizeof(props));
204 props.max_brightness = RADEON_MAX_BL_LEVEL;
205 props.type = BACKLIGHT_RAW;
206 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
207 pdata, &radeon_atom_backlight_ops, &props);
208 if (IS_ERR(bd)) {
209 DRM_ERROR("Backlight registration failed\n");
210 goto error;
211 }
212
213 pdata->encoder = radeon_encoder;
214
215 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
216
217 dig = radeon_encoder->enc_priv;
218 dig->bl_dev = bd;
219
220 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
221 bd->props.power = FB_BLANK_UNBLANK;
222 backlight_update_status(bd);
223
224 DRM_INFO("radeon atom DIG backlight initialized\n");
225
226 return;
227
228error:
229 kfree(pdata);
230 return;
231}
232
233static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
234{
235 struct drm_device *dev = radeon_encoder->base.dev;
236 struct radeon_device *rdev = dev->dev_private;
237 struct backlight_device *bd = NULL;
238 struct radeon_encoder_atom_dig *dig;
239
240 if (!radeon_encoder->enc_priv)
241 return;
242
243 if (!rdev->is_atom_bios)
244 return;
245
246 if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
247 return;
248
249 dig = radeon_encoder->enc_priv;
250 bd = dig->bl_dev;
251 dig->bl_dev = NULL;
252
253 if (bd) {
254 struct radeon_legacy_backlight_privdata *pdata;
255
256 pdata = bl_get_data(bd);
257 backlight_device_unregister(bd);
258 kfree(pdata);
259
260 DRM_INFO("radeon atom LVDS backlight unloaded\n");
261 }
262}
263
264#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
265
266void radeon_atom_backlight_init(struct radeon_encoder *encoder)
267{
268}
269
270static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
271{
272}
273
274#endif
275
34/* evil but including atombios.h is much worse */ 276/* evil but including atombios.h is much worse */
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 277bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode); 278 struct drm_display_mode *mode);
@@ -209,6 +451,32 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
209 451
210} 452}
211 453
454static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
455{
456 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
457 int bpc = 8;
458
459 if (connector)
460 bpc = radeon_get_monitor_bpc(connector);
461
462 switch (bpc) {
463 case 0:
464 return PANEL_BPC_UNDEFINE;
465 case 6:
466 return PANEL_6BIT_PER_COLOR;
467 case 8:
468 default:
469 return PANEL_8BIT_PER_COLOR;
470 case 10:
471 return PANEL_10BIT_PER_COLOR;
472 case 12:
473 return PANEL_12BIT_PER_COLOR;
474 case 16:
475 return PANEL_16BIT_PER_COLOR;
476 }
477}
478
479
212union dvo_encoder_control { 480union dvo_encoder_control {
213 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; 481 ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
214 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; 482 DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
@@ -406,7 +674,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
406 return ATOM_ENCODER_MODE_DP; 674 return ATOM_ENCODER_MODE_DP;
407 675
408 /* DVO is always DVO */ 676 /* DVO is always DVO */
409 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) 677 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
678 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
410 return ATOM_ENCODER_MODE_DVO; 679 return ATOM_ENCODER_MODE_DVO;
411 680
412 connector = radeon_get_connector_for_encoder(encoder); 681 connector = radeon_get_connector_for_encoder(encoder);
@@ -535,7 +804,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
535 int dp_clock = 0; 804 int dp_clock = 0;
536 int dp_lane_count = 0; 805 int dp_lane_count = 0;
537 int hpd_id = RADEON_HPD_NONE; 806 int hpd_id = RADEON_HPD_NONE;
538 int bpc = 8;
539 807
540 if (connector) { 808 if (connector) {
541 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 809 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -545,7 +813,6 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
545 dp_clock = dig_connector->dp_clock; 813 dp_clock = dig_connector->dp_clock;
546 dp_lane_count = dig_connector->dp_lane_count; 814 dp_lane_count = dig_connector->dp_lane_count;
547 hpd_id = radeon_connector->hpd.hpd; 815 hpd_id = radeon_connector->hpd.hpd;
548 bpc = radeon_get_monitor_bpc(connector);
549 } 816 }
550 817
551 /* no dig encoder assigned */ 818 /* no dig encoder assigned */
@@ -612,37 +879,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
612 else 879 else
613 args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); 880 args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
614 881
615 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) 882 if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
616 args.v3.ucLaneNum = dp_lane_count; 883 args.v3.ucLaneNum = dp_lane_count;
617 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 884 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
618 args.v3.ucLaneNum = 8; 885 args.v3.ucLaneNum = 8;
619 else 886 else
620 args.v3.ucLaneNum = 4; 887 args.v3.ucLaneNum = 4;
621 888
622 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) 889 if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
623 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; 890 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
624 args.v3.acConfig.ucDigSel = dig->dig_encoder; 891 args.v3.acConfig.ucDigSel = dig->dig_encoder;
625 switch (bpc) { 892 args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
626 case 0:
627 args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
628 break;
629 case 6:
630 args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
631 break;
632 case 8:
633 default:
634 args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
635 break;
636 case 10:
637 args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
638 break;
639 case 12:
640 args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
641 break;
642 case 16:
643 args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
644 break;
645 }
646 break; 893 break;
647 case 4: 894 case 4:
648 args.v4.ucAction = action; 895 args.v4.ucAction = action;
@@ -652,41 +899,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
652 else 899 else
653 args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); 900 args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
654 901
655 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) 902 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
656 args.v4.ucLaneNum = dp_lane_count; 903 args.v4.ucLaneNum = dp_lane_count;
657 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 904 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
658 args.v4.ucLaneNum = 8; 905 args.v4.ucLaneNum = 8;
659 else 906 else
660 args.v4.ucLaneNum = 4; 907 args.v4.ucLaneNum = 4;
661 908
662 if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) { 909 if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
663 if (dp_clock == 270000) 910 if (dp_clock == 270000)
664 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; 911 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
665 else if (dp_clock == 540000) 912 else if (dp_clock == 540000)
666 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; 913 args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
667 } 914 }
668 args.v4.acConfig.ucDigSel = dig->dig_encoder; 915 args.v4.acConfig.ucDigSel = dig->dig_encoder;
669 switch (bpc) { 916 args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
670 case 0:
671 args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
672 break;
673 case 6:
674 args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
675 break;
676 case 8:
677 default:
678 args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
679 break;
680 case 10:
681 args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
682 break;
683 case 12:
684 args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
685 break;
686 case 16:
687 args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
688 break;
689 }
690 if (hpd_id == RADEON_HPD_NONE) 917 if (hpd_id == RADEON_HPD_NONE)
691 args.v4.ucHPD_ID = 0; 918 args.v4.ucHPD_ID = 0;
692 else 919 else
@@ -799,8 +1026,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
799 args.v1.asMode.ucLaneSet = lane_set; 1026 args.v1.asMode.ucLaneSet = lane_set;
800 } else { 1027 } else {
801 if (is_dp) 1028 if (is_dp)
802 args.v1.usPixelClock = 1029 args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
803 cpu_to_le16(dp_clock / 10);
804 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1030 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
805 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1031 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
806 else 1032 else
@@ -857,8 +1083,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
857 args.v2.asMode.ucLaneSet = lane_set; 1083 args.v2.asMode.ucLaneSet = lane_set;
858 } else { 1084 } else {
859 if (is_dp) 1085 if (is_dp)
860 args.v2.usPixelClock = 1086 args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
861 cpu_to_le16(dp_clock / 10);
862 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1087 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
863 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1088 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
864 else 1089 else
@@ -900,8 +1125,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
900 args.v3.asMode.ucLaneSet = lane_set; 1125 args.v3.asMode.ucLaneSet = lane_set;
901 } else { 1126 } else {
902 if (is_dp) 1127 if (is_dp)
903 args.v3.usPixelClock = 1128 args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
904 cpu_to_le16(dp_clock / 10);
905 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1129 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
906 args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1130 args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
907 else 1131 else
@@ -960,8 +1184,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
960 args.v4.asMode.ucLaneSet = lane_set; 1184 args.v4.asMode.ucLaneSet = lane_set;
961 } else { 1185 } else {
962 if (is_dp) 1186 if (is_dp)
963 args.v4.usPixelClock = 1187 args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
964 cpu_to_le16(dp_clock / 10);
965 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) 1188 else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
966 args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 1189 args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
967 else 1190 else
@@ -1147,7 +1370,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1147 int dp_lane_count = 0; 1370 int dp_lane_count = 0;
1148 int connector_object_id = 0; 1371 int connector_object_id = 0;
1149 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 1372 u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
1150 int bpc = 8;
1151 1373
1152 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1374 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1153 connector = radeon_get_connector_for_encoder_init(encoder); 1375 connector = radeon_get_connector_for_encoder_init(encoder);
@@ -1163,7 +1385,6 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1163 dp_lane_count = dig_connector->dp_lane_count; 1385 dp_lane_count = dig_connector->dp_lane_count;
1164 connector_object_id = 1386 connector_object_id =
1165 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 1387 (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
1166 bpc = radeon_get_monitor_bpc(connector);
1167 } 1388 }
1168 1389
1169 memset(&args, 0, sizeof(args)); 1390 memset(&args, 0, sizeof(args));
@@ -1221,27 +1442,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1221 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; 1442 args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
1222 break; 1443 break;
1223 } 1444 }
1224 switch (bpc) { 1445 args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
1225 case 0:
1226 args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
1227 break;
1228 case 6:
1229 args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
1230 break;
1231 case 8:
1232 default:
1233 args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
1234 break;
1235 case 10:
1236 args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
1237 break;
1238 case 12:
1239 args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
1240 break;
1241 case 16:
1242 args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
1243 break;
1244 }
1245 break; 1446 break;
1246 default: 1447 default:
1247 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); 1448 DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
@@ -2286,6 +2487,8 @@ static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
2286void radeon_enc_destroy(struct drm_encoder *encoder) 2487void radeon_enc_destroy(struct drm_encoder *encoder)
2287{ 2488{
2288 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2489 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2490 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2491 radeon_atom_backlight_exit(radeon_encoder);
2289 kfree(radeon_encoder->enc_priv); 2492 kfree(radeon_encoder->enc_priv);
2290 drm_encoder_cleanup(encoder); 2493 drm_encoder_cleanup(encoder);
2291 kfree(radeon_encoder); 2494 kfree(radeon_encoder);
@@ -2295,7 +2498,7 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
2295 .destroy = radeon_enc_destroy, 2498 .destroy = radeon_enc_destroy,
2296}; 2499};
2297 2500
2298struct radeon_encoder_atom_dac * 2501static struct radeon_encoder_atom_dac *
2299radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 2502radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2300{ 2503{
2301 struct drm_device *dev = radeon_encoder->base.dev; 2504 struct drm_device *dev = radeon_encoder->base.dev;
@@ -2309,7 +2512,7 @@ radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
2309 return dac; 2512 return dac;
2310} 2513}
2311 2514
2312struct radeon_encoder_atom_dig * 2515static struct radeon_encoder_atom_dig *
2313radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) 2516radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
2314{ 2517{
2315 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; 2518 int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index c548dd75ca8b..a1f49c5fd74b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -37,6 +37,16 @@
37#define EVERGREEN_PFP_UCODE_SIZE 1120 37#define EVERGREEN_PFP_UCODE_SIZE 1120
38#define EVERGREEN_PM4_UCODE_SIZE 1376 38#define EVERGREEN_PM4_UCODE_SIZE 1376
39 39
40static const u32 crtc_offsets[6] =
41{
42 EVERGREEN_CRTC0_REGISTER_OFFSET,
43 EVERGREEN_CRTC1_REGISTER_OFFSET,
44 EVERGREEN_CRTC2_REGISTER_OFFSET,
45 EVERGREEN_CRTC3_REGISTER_OFFSET,
46 EVERGREEN_CRTC4_REGISTER_OFFSET,
47 EVERGREEN_CRTC5_REGISTER_OFFSET
48};
49
40static void evergreen_gpu_init(struct radeon_device *rdev); 50static void evergreen_gpu_init(struct radeon_device *rdev);
41void evergreen_fini(struct radeon_device *rdev); 51void evergreen_fini(struct radeon_device *rdev);
42void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 52void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
@@ -105,17 +115,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
105 */ 115 */
106void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 116void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
107{ 117{
108 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
109 int i; 118 int i;
110 119
111 if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { 120 if (crtc >= rdev->num_crtc)
121 return;
122
123 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
112 for (i = 0; i < rdev->usec_timeout; i++) { 124 for (i = 0; i < rdev->usec_timeout; i++) {
113 if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) 125 if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
114 break; 126 break;
115 udelay(1); 127 udelay(1);
116 } 128 }
117 for (i = 0; i < rdev->usec_timeout; i++) { 129 for (i = 0; i < rdev->usec_timeout; i++) {
118 if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) 130 if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
119 break; 131 break;
120 udelay(1); 132 udelay(1);
121 } 133 }
@@ -310,6 +322,64 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
310} 322}
311 323
312/** 324/**
325 * btc_pm_init_profile - Initialize power profiles callback.
326 *
327 * @rdev: radeon_device pointer
328 *
329 * Initialize the power states used in profile mode
330 * (BTC, cayman).
331 * Used for profile mode only.
332 */
333void btc_pm_init_profile(struct radeon_device *rdev)
334{
335 int idx;
336
337 /* default */
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
339 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
340 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
341 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
342 /* starting with BTC, there is one state that is used for both
343 * MH and SH. Difference is that we always use the high clock index for
344 * mclk.
345 */
346 if (rdev->flags & RADEON_IS_MOBILITY)
347 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
348 else
349 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
350 /* low sh */
351 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
352 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
353 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
354 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
355 /* mid sh */
356 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
357 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
358 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
359 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
360 /* high sh */
361 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
362 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
363 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
364 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
365 /* low mh */
366 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
367 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
368 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
369 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
370 /* mid mh */
371 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
372 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
373 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
375 /* high mh */
376 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
377 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
378 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
380}
381
382/**
313 * evergreen_pm_misc - set additional pm hw parameters callback. 383 * evergreen_pm_misc - set additional pm hw parameters callback.
314 * 384 *
315 * @rdev: radeon_device pointer 385 * @rdev: radeon_device pointer
@@ -1105,7 +1175,7 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
1105 } 1175 }
1106} 1176}
1107 1177
1108int evergreen_pcie_gart_enable(struct radeon_device *rdev) 1178static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1109{ 1179{
1110 u32 tmp; 1180 u32 tmp;
1111 int r; 1181 int r;
@@ -1164,7 +1234,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1164 return 0; 1234 return 0;
1165} 1235}
1166 1236
1167void evergreen_pcie_gart_disable(struct radeon_device *rdev) 1237static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1168{ 1238{
1169 u32 tmp; 1239 u32 tmp;
1170 1240
@@ -1189,7 +1259,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
1189 radeon_gart_table_vram_unpin(rdev); 1259 radeon_gart_table_vram_unpin(rdev);
1190} 1260}
1191 1261
1192void evergreen_pcie_gart_fini(struct radeon_device *rdev) 1262static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1193{ 1263{
1194 evergreen_pcie_gart_disable(rdev); 1264 evergreen_pcie_gart_disable(rdev);
1195 radeon_gart_table_vram_free(rdev); 1265 radeon_gart_table_vram_free(rdev);
@@ -1197,7 +1267,7 @@ void evergreen_pcie_gart_fini(struct radeon_device *rdev)
1197} 1267}
1198 1268
1199 1269
1200void evergreen_agp_enable(struct radeon_device *rdev) 1270static void evergreen_agp_enable(struct radeon_device *rdev)
1201{ 1271{
1202 u32 tmp; 1272 u32 tmp;
1203 1273
@@ -1225,116 +1295,103 @@ void evergreen_agp_enable(struct radeon_device *rdev)
1225 1295
1226void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) 1296void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
1227{ 1297{
1298 u32 crtc_enabled, tmp, frame_count, blackout;
1299 int i, j;
1300
1228 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 1301 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
1229 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 1302 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
1230 1303
1231 /* Stop all video */ 1304 /* disable VGA render */
1232 WREG32(VGA_RENDER_CONTROL, 0); 1305 WREG32(VGA_RENDER_CONTROL, 0);
1233 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); 1306 /* blank the display controllers */
1234 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); 1307 for (i = 0; i < rdev->num_crtc; i++) {
1235 if (rdev->num_crtc >= 4) { 1308 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
1236 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); 1309 if (crtc_enabled) {
1237 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); 1310 save->crtc_enabled[i] = true;
1238 } 1311 if (ASIC_IS_DCE6(rdev)) {
1239 if (rdev->num_crtc >= 6) { 1312 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1240 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); 1313 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1241 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); 1314 radeon_wait_for_vblank(rdev, i);
1242 } 1315 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1243 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1316 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1244 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1317 }
1245 if (rdev->num_crtc >= 4) { 1318 } else {
1246 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1319 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1247 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1320 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1248 } 1321 radeon_wait_for_vblank(rdev, i);
1249 if (rdev->num_crtc >= 6) { 1322 tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1250 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1323 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1251 WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 1324 }
1252 } 1325 }
1253 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 1326 /* wait for the next frame */
1254 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 1327 frame_count = radeon_get_vblank_counter(rdev, i);
1255 if (rdev->num_crtc >= 4) { 1328 for (j = 0; j < rdev->usec_timeout; j++) {
1256 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 1329 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1257 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 1330 break;
1258 } 1331 udelay(1);
1259 if (rdev->num_crtc >= 6) { 1332 }
1260 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); 1333 }
1261 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
1262 } 1334 }
1263 1335
1264 WREG32(D1VGA_CONTROL, 0); 1336 radeon_mc_wait_for_idle(rdev);
1265 WREG32(D2VGA_CONTROL, 0); 1337
1266 if (rdev->num_crtc >= 4) { 1338 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1267 WREG32(EVERGREEN_D3VGA_CONTROL, 0); 1339 if ((blackout & BLACKOUT_MODE_MASK) != 1) {
1268 WREG32(EVERGREEN_D4VGA_CONTROL, 0); 1340 /* Block CPU access */
1269 } 1341 WREG32(BIF_FB_EN, 0);
1270 if (rdev->num_crtc >= 6) { 1342 /* blackout the MC */
1271 WREG32(EVERGREEN_D5VGA_CONTROL, 0); 1343 blackout &= ~BLACKOUT_MODE_MASK;
1272 WREG32(EVERGREEN_D6VGA_CONTROL, 0); 1344 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1273 } 1345 }
1274} 1346}
1275 1347
1276void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) 1348void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1277{ 1349{
1278 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, 1350 u32 tmp, frame_count;
1279 upper_32_bits(rdev->mc.vram_start)); 1351 int i, j;
1280 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
1281 upper_32_bits(rdev->mc.vram_start));
1282 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1283 (u32)rdev->mc.vram_start);
1284 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1285 (u32)rdev->mc.vram_start);
1286
1287 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1288 upper_32_bits(rdev->mc.vram_start));
1289 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
1290 upper_32_bits(rdev->mc.vram_start));
1291 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1292 (u32)rdev->mc.vram_start);
1293 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1294 (u32)rdev->mc.vram_start);
1295
1296 if (rdev->num_crtc >= 4) {
1297 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1298 upper_32_bits(rdev->mc.vram_start));
1299 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
1300 upper_32_bits(rdev->mc.vram_start));
1301 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1302 (u32)rdev->mc.vram_start);
1303 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1304 (u32)rdev->mc.vram_start);
1305
1306 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1307 upper_32_bits(rdev->mc.vram_start));
1308 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
1309 upper_32_bits(rdev->mc.vram_start));
1310 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1311 (u32)rdev->mc.vram_start);
1312 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1313 (u32)rdev->mc.vram_start);
1314 }
1315 if (rdev->num_crtc >= 6) {
1316 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1317 upper_32_bits(rdev->mc.vram_start));
1318 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
1319 upper_32_bits(rdev->mc.vram_start));
1320 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1321 (u32)rdev->mc.vram_start);
1322 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1323 (u32)rdev->mc.vram_start);
1324 1352
1325 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1353 /* update crtc base addresses */
1354 for (i = 0; i < rdev->num_crtc; i++) {
1355 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1326 upper_32_bits(rdev->mc.vram_start)); 1356 upper_32_bits(rdev->mc.vram_start));
1327 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, 1357 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
1328 upper_32_bits(rdev->mc.vram_start)); 1358 upper_32_bits(rdev->mc.vram_start));
1329 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1359 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
1330 (u32)rdev->mc.vram_start); 1360 (u32)rdev->mc.vram_start);
1331 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, 1361 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
1332 (u32)rdev->mc.vram_start); 1362 (u32)rdev->mc.vram_start);
1333 } 1363 }
1334
1335 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 1364 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1336 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 1365 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1337 /* Unlock host access */ 1366
1367 /* unblackout the MC */
1368 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1369 tmp &= ~BLACKOUT_MODE_MASK;
1370 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
1371 /* allow CPU access */
1372 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1373
1374 for (i = 0; i < rdev->num_crtc; i++) {
1375 if (save->crtc_enabled) {
1376 if (ASIC_IS_DCE6(rdev)) {
1377 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1378 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1379 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1380 } else {
1381 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1382 tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1383 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1384 }
1385 /* wait for the next frame */
1386 frame_count = radeon_get_vblank_counter(rdev, i);
1387 for (j = 0; j < rdev->usec_timeout; j++) {
1388 if (radeon_get_vblank_counter(rdev, i) != frame_count)
1389 break;
1390 udelay(1);
1391 }
1392 }
1393 }
1394 /* Unlock vga access */
1338 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 1395 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
1339 mdelay(1); 1396 mdelay(1);
1340 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 1397 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
@@ -1553,7 +1610,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1553 return 0; 1610 return 0;
1554} 1611}
1555 1612
1556int evergreen_cp_resume(struct radeon_device *rdev) 1613static int evergreen_cp_resume(struct radeon_device *rdev)
1557{ 1614{
1558 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1615 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1559 u32 tmp; 1616 u32 tmp;
@@ -2329,22 +2386,10 @@ int evergreen_asic_reset(struct radeon_device *rdev)
2329 2386
2330u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc) 2387u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
2331{ 2388{
2332 switch (crtc) { 2389 if (crtc >= rdev->num_crtc)
2333 case 0:
2334 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
2335 case 1:
2336 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
2337 case 2:
2338 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
2339 case 3:
2340 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
2341 case 4:
2342 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
2343 case 5:
2344 return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
2345 default:
2346 return 0; 2390 return 0;
2347 } 2391 else
2392 return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
2348} 2393}
2349 2394
2350void evergreen_disable_interrupt_state(struct radeon_device *rdev) 2395void evergreen_disable_interrupt_state(struct radeon_device *rdev)
@@ -2537,10 +2582,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
2537 DRM_DEBUG("evergreen_irq_set: hdmi 5\n"); 2582 DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
2538 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK; 2583 afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
2539 } 2584 }
2540 if (rdev->irq.gui_idle) {
2541 DRM_DEBUG("gui idle\n");
2542 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
2543 }
2544 2585
2545 if (rdev->family >= CHIP_CAYMAN) { 2586 if (rdev->family >= CHIP_CAYMAN) {
2546 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl); 2587 cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
@@ -2722,7 +2763,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
2722 } 2763 }
2723} 2764}
2724 2765
2725void evergreen_irq_disable(struct radeon_device *rdev) 2766static void evergreen_irq_disable(struct radeon_device *rdev)
2726{ 2767{
2727 r600_disable_interrupts(rdev); 2768 r600_disable_interrupts(rdev);
2728 /* Wait and acknowledge irq */ 2769 /* Wait and acknowledge irq */
@@ -3075,7 +3116,6 @@ restart_ih:
3075 break; 3116 break;
3076 case 233: /* GUI IDLE */ 3117 case 233: /* GUI IDLE */
3077 DRM_DEBUG("IH: GUI idle\n"); 3118 DRM_DEBUG("IH: GUI idle\n");
3078 wake_up(&rdev->irq.idle_queue);
3079 break; 3119 break;
3080 default: 3120 default:
3081 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3121 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 101acd618f67..573ed1bc6cf7 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -846,6 +846,16 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
846 return -EINVAL; 846 return -EINVAL;
847 } 847 }
848 848
849 if (!mipmap) {
850 if (llevel) {
851 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
852 __func__, __LINE__);
853 return -EINVAL;
854 } else {
855 return 0; /* everything's ok */
856 }
857 }
858
849 /* check mipmap size */ 859 /* check mipmap size */
850 for (i = 1; i <= llevel; i++) { 860 for (i = 1; i <= llevel; i++) {
851 unsigned w, h, d; 861 unsigned w, h, d;
@@ -995,7 +1005,7 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
995 * Assume that chunk_ib_index is properly set. Will return -EINVAL 1005 * Assume that chunk_ib_index is properly set. Will return -EINVAL
996 * if packet is bigger than remaining ib size. or if packets is unknown. 1006 * if packet is bigger than remaining ib size. or if packets is unknown.
997 **/ 1007 **/
998int evergreen_cs_packet_parse(struct radeon_cs_parser *p, 1008static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
999 struct radeon_cs_packet *pkt, 1009 struct radeon_cs_packet *pkt,
1000 unsigned idx) 1010 unsigned idx)
1001{ 1011{
@@ -1081,6 +1091,27 @@ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
1081} 1091}
1082 1092
1083/** 1093/**
1094 * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
1095 * @p: structure holding the parser context.
1096 *
1097 * Check if the next packet is a relocation packet3.
1098 **/
1099static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
1100{
1101 struct radeon_cs_packet p3reloc;
1102 int r;
1103
1104 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1105 if (r) {
1106 return false;
1107 }
1108 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1109 return false;
1110 }
1111 return true;
1112}
1113
1114/**
1084 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet 1115 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
1085 * @parser: parser structure holding parsing context. 1116 * @parser: parser structure holding parsing context.
1086 * 1117 *
@@ -2330,7 +2361,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2330 for (i = 0; i < (pkt->count / 8); i++) { 2361 for (i = 0; i < (pkt->count / 8); i++) {
2331 struct radeon_bo *texture, *mipmap; 2362 struct radeon_bo *texture, *mipmap;
2332 u32 toffset, moffset; 2363 u32 toffset, moffset;
2333 u32 size, offset; 2364 u32 size, offset, mip_address, tex_dim;
2334 2365
2335 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { 2366 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2336 case SQ_TEX_VTX_VALID_TEXTURE: 2367 case SQ_TEX_VTX_VALID_TEXTURE:
@@ -2359,14 +2390,28 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2359 } 2390 }
2360 texture = reloc->robj; 2391 texture = reloc->robj;
2361 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2392 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2393
2362 /* tex mip base */ 2394 /* tex mip base */
2363 r = evergreen_cs_packet_next_reloc(p, &reloc); 2395 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2364 if (r) { 2396 mip_address = ib[idx+1+(i*8)+3];
2365 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2397
2366 return -EINVAL; 2398 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2399 !mip_address &&
2400 !evergreen_cs_packet_next_is_pkt3_nop(p)) {
2401 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2402 * It should be 0 if FMASK is disabled. */
2403 moffset = 0;
2404 mipmap = NULL;
2405 } else {
2406 r = evergreen_cs_packet_next_reloc(p, &reloc);
2407 if (r) {
2408 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2409 return -EINVAL;
2410 }
2411 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2412 mipmap = reloc->robj;
2367 } 2413 }
2368 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2414
2369 mipmap = reloc->robj;
2370 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8)); 2415 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2371 if (r) 2416 if (r)
2372 return r; 2417 return r;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 8beac1065025..034f4c22e5db 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -218,6 +218,8 @@
218#define EVERGREEN_CRTC_CONTROL 0x6e70 218#define EVERGREEN_CRTC_CONTROL 0x6e70
219# define EVERGREEN_CRTC_MASTER_EN (1 << 0) 219# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
220# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) 220# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
221#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
222# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
221#define EVERGREEN_CRTC_STATUS 0x6e8c 223#define EVERGREEN_CRTC_STATUS 0x6e8c
222# define EVERGREEN_CRTC_V_BLANK (1 << 0) 224# define EVERGREEN_CRTC_V_BLANK (1 << 0)
223#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 225#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79347855d9bf..df542f1a5dfb 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -87,6 +87,10 @@
87 87
88#define CONFIG_MEMSIZE 0x5428 88#define CONFIG_MEMSIZE 0x5428
89 89
90#define BIF_FB_EN 0x5490
91#define FB_READ_EN (1 << 0)
92#define FB_WRITE_EN (1 << 1)
93
90#define CP_COHER_BASE 0x85F8 94#define CP_COHER_BASE 0x85F8
91#define CP_STALLED_STAT1 0x8674 95#define CP_STALLED_STAT1 0x8674
92#define CP_STALLED_STAT2 0x8678 96#define CP_STALLED_STAT2 0x8678
@@ -430,6 +434,9 @@
430#define NOOFCHAN_MASK 0x00003000 434#define NOOFCHAN_MASK 0x00003000
431#define MC_SHARED_CHREMAP 0x2008 435#define MC_SHARED_CHREMAP 0x2008
432 436
437#define MC_SHARED_BLACKOUT_CNTL 0x20ac
438#define BLACKOUT_MODE_MASK 0x00000007
439
433#define MC_ARB_RAMCFG 0x2760 440#define MC_ARB_RAMCFG 0x2760
434#define NOOFBANK_SHIFT 0 441#define NOOFBANK_SHIFT 0
435#define NOOFBANK_MASK 0x00000003 442#define NOOFBANK_MASK 0x00000003
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 174462519f15..8bcb554ea0c5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -726,7 +726,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
726 WREG32(VM_INVALIDATE_REQUEST, 1); 726 WREG32(VM_INVALIDATE_REQUEST, 1);
727} 727}
728 728
729int cayman_pcie_gart_enable(struct radeon_device *rdev) 729static int cayman_pcie_gart_enable(struct radeon_device *rdev)
730{ 730{
731 int i, r; 731 int i, r;
732 732
@@ -782,7 +782,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
782 (u32)(rdev->dummy_page.addr >> 12)); 782 (u32)(rdev->dummy_page.addr >> 12));
783 WREG32(VM_CONTEXT1_CNTL2, 0); 783 WREG32(VM_CONTEXT1_CNTL2, 0);
784 WREG32(VM_CONTEXT1_CNTL, 0); 784 WREG32(VM_CONTEXT1_CNTL, 0);
785 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 785 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
786 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 786 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
787 787
788 cayman_pcie_gart_tlb_flush(rdev); 788 cayman_pcie_gart_tlb_flush(rdev);
@@ -793,7 +793,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
793 return 0; 793 return 0;
794} 794}
795 795
796void cayman_pcie_gart_disable(struct radeon_device *rdev) 796static void cayman_pcie_gart_disable(struct radeon_device *rdev)
797{ 797{
798 /* Disable all tables */ 798 /* Disable all tables */
799 WREG32(VM_CONTEXT0_CNTL, 0); 799 WREG32(VM_CONTEXT0_CNTL, 0);
@@ -813,7 +813,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
813 radeon_gart_table_vram_unpin(rdev); 813 radeon_gart_table_vram_unpin(rdev);
814} 814}
815 815
816void cayman_pcie_gart_fini(struct radeon_device *rdev) 816static void cayman_pcie_gart_fini(struct radeon_device *rdev)
817{ 817{
818 cayman_pcie_gart_disable(rdev); 818 cayman_pcie_gart_disable(rdev);
819 radeon_gart_table_vram_free(rdev); 819 radeon_gart_table_vram_free(rdev);
@@ -879,12 +879,13 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
879#endif 879#endif
880 (ib->gpu_addr & 0xFFFFFFFC)); 880 (ib->gpu_addr & 0xFFFFFFFC));
881 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 881 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
882 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 882 radeon_ring_write(ring, ib->length_dw |
883 (ib->vm ? (ib->vm->id << 24) : 0));
883 884
884 /* flush read cache over gart for this vmid */ 885 /* flush read cache over gart for this vmid */
885 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 886 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
886 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 887 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
887 radeon_ring_write(ring, ib->vm_id); 888 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
888 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 889 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
889 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); 890 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
890 radeon_ring_write(ring, 0xFFFFFFFF); 891 radeon_ring_write(ring, 0xFFFFFFFF);
@@ -1004,7 +1005,7 @@ static void cayman_cp_fini(struct radeon_device *rdev)
1004 radeon_scratch_free(rdev, ring->rptr_save_reg); 1005 radeon_scratch_free(rdev, ring->rptr_save_reg);
1005} 1006}
1006 1007
1007int cayman_cp_resume(struct radeon_device *rdev) 1008static int cayman_cp_resume(struct radeon_device *rdev)
1008{ 1009{
1009 static const int ridx[] = { 1010 static const int ridx[] = {
1010 RADEON_RING_TYPE_GFX_INDEX, 1011 RADEON_RING_TYPE_GFX_INDEX,
@@ -1496,53 +1497,16 @@ void cayman_vm_fini(struct radeon_device *rdev)
1496{ 1497{
1497} 1498}
1498 1499
1499int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 1500#define R600_ENTRY_VALID (1 << 0)
1500{
1501 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
1502 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
1503 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
1504 /* flush hdp cache */
1505 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1506 /* bits 0-7 are the VM contexts0-7 */
1507 WREG32(VM_INVALIDATE_REQUEST, 1 << id);
1508 return 0;
1509}
1510
1511void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
1512{
1513 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
1514 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
1515 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
1516 /* flush hdp cache */
1517 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1518 /* bits 0-7 are the VM contexts0-7 */
1519 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1520}
1521
1522void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
1523{
1524 if (vm->id == -1)
1525 return;
1526
1527 /* flush hdp cache */
1528 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1529 /* bits 0-7 are the VM contexts0-7 */
1530 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
1531}
1532
1533#define R600_PTE_VALID (1 << 0)
1534#define R600_PTE_SYSTEM (1 << 1) 1501#define R600_PTE_SYSTEM (1 << 1)
1535#define R600_PTE_SNOOPED (1 << 2) 1502#define R600_PTE_SNOOPED (1 << 2)
1536#define R600_PTE_READABLE (1 << 5) 1503#define R600_PTE_READABLE (1 << 5)
1537#define R600_PTE_WRITEABLE (1 << 6) 1504#define R600_PTE_WRITEABLE (1 << 6)
1538 1505
1539uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 1506uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
1540 struct radeon_vm *vm,
1541 uint32_t flags)
1542{ 1507{
1543 uint32_t r600_flags = 0; 1508 uint32_t r600_flags = 0;
1544 1509 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
1545 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
1546 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1510 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1547 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1511 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1548 if (flags & RADEON_VM_PAGE_SYSTEM) { 1512 if (flags & RADEON_VM_PAGE_SYSTEM) {
@@ -1552,12 +1516,76 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
1552 return r600_flags; 1516 return r600_flags;
1553} 1517}
1554 1518
1555void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, 1519/**
1556 unsigned pfn, uint64_t addr, uint32_t flags) 1520 * cayman_vm_set_page - update the page tables using the CP
1521 *
1522 * @rdev: radeon_device pointer
1523 * @pe: addr of the page entry
1524 * @addr: dst addr to write into pe
1525 * @count: number of page entries to update
1526 * @incr: increase next addr by incr bytes
1527 * @flags: access flags
1528 *
1529 * Update the page tables using the CP (cayman-si).
1530 */
1531void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1532 uint64_t addr, unsigned count,
1533 uint32_t incr, uint32_t flags)
1534{
1535 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1536 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1537 int i;
1538
1539 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
1540 radeon_ring_write(ring, pe);
1541 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1542 for (i = 0; i < count; ++i) {
1543 uint64_t value = 0;
1544 if (flags & RADEON_VM_PAGE_SYSTEM) {
1545 value = radeon_vm_map_gart(rdev, addr);
1546 value &= 0xFFFFFFFFFFFFF000ULL;
1547 addr += incr;
1548
1549 } else if (flags & RADEON_VM_PAGE_VALID) {
1550 value = addr;
1551 addr += incr;
1552 }
1553
1554 value |= r600_flags;
1555 radeon_ring_write(ring, value);
1556 radeon_ring_write(ring, upper_32_bits(value));
1557 }
1558}
1559
1560/**
1561 * cayman_vm_flush - vm flush using the CP
1562 *
1563 * @rdev: radeon_device pointer
1564 *
1565 * Update the page table base and flush the VM TLB
1566 * using the CP (cayman-si).
1567 */
1568void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1557{ 1569{
1558 void __iomem *ptr = (void *)vm->pt; 1570 struct radeon_ring *ring = &rdev->ring[ridx];
1571
1572 if (vm == NULL)
1573 return;
1574
1575 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0));
1576 radeon_ring_write(ring, 0);
1577
1578 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0));
1579 radeon_ring_write(ring, vm->last_pfn);
1559 1580
1560 addr = addr & 0xFFFFFFFFFFFFF000ULL; 1581 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1561 addr |= flags; 1582 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1562 writeq(addr, ptr + (pfn * 8)); 1583
1584 /* flush hdp cache */
1585 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1586 radeon_ring_write(ring, 0x1);
1587
1588 /* bits 0-7 are the VM contexts0-7 */
1589 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1590 radeon_ring_write(ring, 1 << vm->id);
1563} 1591}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 870db340d377..2423d1b5d385 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -585,6 +585,7 @@
585#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 585#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
586#define PACKET3_SET_RESOURCE_INDIRECT 0x74 586#define PACKET3_SET_RESOURCE_INDIRECT 0x74
587#define PACKET3_SET_APPEND_CNT 0x75 587#define PACKET3_SET_APPEND_CNT 0x75
588#define PACKET3_ME_WRITE 0x7A
588 589
589#endif 590#endif
590 591
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 3183a815f71c..376884f1bcd2 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -79,10 +79,12 @@ MODULE_FIRMWARE(FIRMWARE_R520);
79 */ 79 */
80void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) 80void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
81{ 81{
82 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
83 int i; 82 int i;
84 83
85 if (radeon_crtc->crtc_id == 0) { 84 if (crtc >= rdev->num_crtc)
85 return;
86
87 if (crtc == 0) {
86 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { 88 if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
87 for (i = 0; i < rdev->usec_timeout; i++) { 89 for (i = 0; i < rdev->usec_timeout; i++) {
88 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) 90 if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
@@ -697,9 +699,6 @@ int r100_irq_set(struct radeon_device *rdev)
697 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 699 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
698 tmp |= RADEON_SW_INT_ENABLE; 700 tmp |= RADEON_SW_INT_ENABLE;
699 } 701 }
700 if (rdev->irq.gui_idle) {
701 tmp |= RADEON_GUI_IDLE_MASK;
702 }
703 if (rdev->irq.crtc_vblank_int[0] || 702 if (rdev->irq.crtc_vblank_int[0] ||
704 atomic_read(&rdev->irq.pflip[0])) { 703 atomic_read(&rdev->irq.pflip[0])) {
705 tmp |= RADEON_CRTC_VBLANK_MASK; 704 tmp |= RADEON_CRTC_VBLANK_MASK;
@@ -736,12 +735,6 @@ static uint32_t r100_irq_ack(struct radeon_device *rdev)
736 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 735 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
737 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 736 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
738 737
739 /* the interrupt works, but the status bit is permanently asserted */
740 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
741 if (!rdev->irq.gui_idle_acked)
742 irq_mask |= RADEON_GUI_IDLE_STAT;
743 }
744
745 if (irqs) { 738 if (irqs) {
746 WREG32(RADEON_GEN_INT_STATUS, irqs); 739 WREG32(RADEON_GEN_INT_STATUS, irqs);
747 } 740 }
@@ -753,9 +746,6 @@ int r100_irq_process(struct radeon_device *rdev)
753 uint32_t status, msi_rearm; 746 uint32_t status, msi_rearm;
754 bool queue_hotplug = false; 747 bool queue_hotplug = false;
755 748
756 /* reset gui idle ack. the status bit is broken */
757 rdev->irq.gui_idle_acked = false;
758
759 status = r100_irq_ack(rdev); 749 status = r100_irq_ack(rdev);
760 if (!status) { 750 if (!status) {
761 return IRQ_NONE; 751 return IRQ_NONE;
@@ -768,11 +758,6 @@ int r100_irq_process(struct radeon_device *rdev)
768 if (status & RADEON_SW_INT_TEST) { 758 if (status & RADEON_SW_INT_TEST) {
769 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 759 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
770 } 760 }
771 /* gui idle interrupt */
772 if (status & RADEON_GUI_IDLE_STAT) {
773 rdev->irq.gui_idle_acked = true;
774 wake_up(&rdev->irq.idle_queue);
775 }
776 /* Vertical blank interrupts */ 761 /* Vertical blank interrupts */
777 if (status & RADEON_CRTC_VBLANK_STAT) { 762 if (status & RADEON_CRTC_VBLANK_STAT) {
778 if (rdev->irq.crtc_vblank_int[0]) { 763 if (rdev->irq.crtc_vblank_int[0]) {
@@ -802,8 +787,6 @@ int r100_irq_process(struct radeon_device *rdev)
802 } 787 }
803 status = r100_irq_ack(rdev); 788 status = r100_irq_ack(rdev);
804 } 789 }
805 /* reset gui idle ack. the status bit is broken */
806 rdev->irq.gui_idle_acked = false;
807 if (queue_hotplug) 790 if (queue_hotplug)
808 schedule_work(&rdev->hotplug_work); 791 schedule_work(&rdev->hotplug_work);
809 if (rdev->msi_enabled) { 792 if (rdev->msi_enabled) {
@@ -2529,7 +2512,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
2529/* 2512/*
2530 * Global GPU functions 2513 * Global GPU functions
2531 */ 2514 */
2532void r100_errata(struct radeon_device *rdev) 2515static void r100_errata(struct radeon_device *rdev)
2533{ 2516{
2534 rdev->pll_errata = 0; 2517 rdev->pll_errata = 0;
2535 2518
@@ -2544,51 +2527,7 @@ void r100_errata(struct radeon_device *rdev)
2544 } 2527 }
2545} 2528}
2546 2529
2547/* Wait for vertical sync on primary CRTC */ 2530static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2548void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
2549{
2550 uint32_t crtc_gen_cntl, tmp;
2551 int i;
2552
2553 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
2554 if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
2555 !(crtc_gen_cntl & RADEON_CRTC_EN)) {
2556 return;
2557 }
2558 /* Clear the CRTC_VBLANK_SAVE bit */
2559 WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
2560 for (i = 0; i < rdev->usec_timeout; i++) {
2561 tmp = RREG32(RADEON_CRTC_STATUS);
2562 if (tmp & RADEON_CRTC_VBLANK_SAVE) {
2563 return;
2564 }
2565 DRM_UDELAY(1);
2566 }
2567}
2568
2569/* Wait for vertical sync on secondary CRTC */
2570void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
2571{
2572 uint32_t crtc2_gen_cntl, tmp;
2573 int i;
2574
2575 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
2576 if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
2577 !(crtc2_gen_cntl & RADEON_CRTC2_EN))
2578 return;
2579
2580 /* Clear the CRTC_VBLANK_SAVE bit */
2581 WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
2582 for (i = 0; i < rdev->usec_timeout; i++) {
2583 tmp = RREG32(RADEON_CRTC2_STATUS);
2584 if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
2585 return;
2586 }
2587 DRM_UDELAY(1);
2588 }
2589}
2590
2591int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
2592{ 2531{
2593 unsigned i; 2532 unsigned i;
2594 uint32_t tmp; 2533 uint32_t tmp;
@@ -2949,7 +2888,7 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
2949 WREG32(RADEON_CONFIG_CNTL, temp); 2888 WREG32(RADEON_CONFIG_CNTL, temp);
2950} 2889}
2951 2890
2952void r100_mc_init(struct radeon_device *rdev) 2891static void r100_mc_init(struct radeon_device *rdev)
2953{ 2892{
2954 u64 base; 2893 u64 base;
2955 2894
@@ -3021,7 +2960,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
3021 r100_pll_errata_after_data(rdev); 2960 r100_pll_errata_after_data(rdev);
3022} 2961}
3023 2962
3024void r100_set_safe_registers(struct radeon_device *rdev) 2963static void r100_set_safe_registers(struct radeon_device *rdev)
3025{ 2964{
3026 if (ASIC_IS_RN50(rdev)) { 2965 if (ASIC_IS_RN50(rdev)) {
3027 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; 2966 rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
@@ -3816,9 +3755,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3816 return r; 3755 return r;
3817 } 3756 }
3818 WREG32(scratch, 0xCAFEDEAD); 3757 WREG32(scratch, 0xCAFEDEAD);
3819 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256); 3758 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
3820 if (r) { 3759 if (r) {
3821 return r; 3760 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3761 goto free_scratch;
3822 } 3762 }
3823 ib.ptr[0] = PACKET0(scratch, 0); 3763 ib.ptr[0] = PACKET0(scratch, 0);
3824 ib.ptr[1] = 0xDEADBEEF; 3764 ib.ptr[1] = 0xDEADBEEF;
@@ -3831,13 +3771,13 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3831 ib.length_dw = 8; 3771 ib.length_dw = 8;
3832 r = radeon_ib_schedule(rdev, &ib, NULL); 3772 r = radeon_ib_schedule(rdev, &ib, NULL);
3833 if (r) { 3773 if (r) {
3834 radeon_scratch_free(rdev, scratch); 3774 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3835 radeon_ib_free(rdev, &ib); 3775 goto free_ib;
3836 return r;
3837 } 3776 }
3838 r = radeon_fence_wait(ib.fence, false); 3777 r = radeon_fence_wait(ib.fence, false);
3839 if (r) { 3778 if (r) {
3840 return r; 3779 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3780 goto free_ib;
3841 } 3781 }
3842 for (i = 0; i < rdev->usec_timeout; i++) { 3782 for (i = 0; i < rdev->usec_timeout; i++) {
3843 tmp = RREG32(scratch); 3783 tmp = RREG32(scratch);
@@ -3853,8 +3793,10 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3853 scratch, tmp); 3793 scratch, tmp);
3854 r = -EINVAL; 3794 r = -EINVAL;
3855 } 3795 }
3856 radeon_scratch_free(rdev, scratch); 3796free_ib:
3857 radeon_ib_free(rdev, &ib); 3797 radeon_ib_free(rdev, &ib);
3798free_scratch:
3799 radeon_scratch_free(rdev, scratch);
3858 return r; 3800 return r;
3859} 3801}
3860 3802
@@ -3963,7 +3905,7 @@ static void r100_mc_program(struct radeon_device *rdev)
3963 r100_mc_resume(rdev, &save); 3905 r100_mc_resume(rdev, &save);
3964} 3906}
3965 3907
3966void r100_clock_startup(struct radeon_device *rdev) 3908static void r100_clock_startup(struct radeon_device *rdev)
3967{ 3909{
3968 u32 tmp; 3910 u32 tmp;
3969 3911
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1e10df214271..d0ba6023a1f8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -296,7 +296,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
296 radeon_ring_unlock_commit(rdev, ring); 296 radeon_ring_unlock_commit(rdev, ring);
297} 297}
298 298
299void r300_errata(struct radeon_device *rdev) 299static void r300_errata(struct radeon_device *rdev)
300{ 300{
301 rdev->pll_errata = 0; 301 rdev->pll_errata = 0;
302 302
@@ -322,7 +322,7 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
322 return -1; 322 return -1;
323} 323}
324 324
325void r300_gpu_init(struct radeon_device *rdev) 325static void r300_gpu_init(struct radeon_device *rdev)
326{ 326{
327 uint32_t gb_tile_config, tmp; 327 uint32_t gb_tile_config, tmp;
328 328
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 90703d539e04..f795a4e092cb 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,7 +119,7 @@ static void r520_vram_get_type(struct radeon_device *rdev)
119 rdev->mc.vram_width *= 2; 119 rdev->mc.vram_width *= 2;
120} 120}
121 121
122void r520_mc_init(struct radeon_device *rdev) 122static void r520_mc_init(struct radeon_device *rdev)
123{ 123{
124 124
125 r520_vram_get_type(rdev); 125 r520_vram_get_type(rdev);
@@ -131,7 +131,7 @@ void r520_mc_init(struct radeon_device *rdev)
131 radeon_update_bandwidth_info(rdev); 131 radeon_update_bandwidth_info(rdev);
132} 132}
133 133
134void r520_mc_program(struct radeon_device *rdev) 134static void r520_mc_program(struct radeon_device *rdev)
135{ 135{
136 struct rv515_mc_save save; 136 struct rv515_mc_save save;
137 137
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9f2cafd10f4a..70c800ff6190 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -98,7 +98,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
98 98
99/* r600,rv610,rv630,rv620,rv635,rv670 */ 99/* r600,rv610,rv630,rv620,rv635,rv670 */
100int r600_mc_wait_for_idle(struct radeon_device *rdev); 100int r600_mc_wait_for_idle(struct radeon_device *rdev);
101void r600_gpu_init(struct radeon_device *rdev); 101static void r600_gpu_init(struct radeon_device *rdev);
102void r600_fini(struct radeon_device *rdev); 102void r600_fini(struct radeon_device *rdev);
103void r600_irq_disable(struct radeon_device *rdev); 103void r600_irq_disable(struct radeon_device *rdev);
104static void r600_pcie_gen2_enable(struct radeon_device *rdev); 104static void r600_pcie_gen2_enable(struct radeon_device *rdev);
@@ -881,7 +881,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
881 return radeon_gart_table_vram_alloc(rdev); 881 return radeon_gart_table_vram_alloc(rdev);
882} 882}
883 883
884int r600_pcie_gart_enable(struct radeon_device *rdev) 884static int r600_pcie_gart_enable(struct radeon_device *rdev)
885{ 885{
886 u32 tmp; 886 u32 tmp;
887 int r, i; 887 int r, i;
@@ -938,7 +938,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
938 return 0; 938 return 0;
939} 939}
940 940
941void r600_pcie_gart_disable(struct radeon_device *rdev) 941static void r600_pcie_gart_disable(struct radeon_device *rdev)
942{ 942{
943 u32 tmp; 943 u32 tmp;
944 int i; 944 int i;
@@ -971,14 +971,14 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
971 radeon_gart_table_vram_unpin(rdev); 971 radeon_gart_table_vram_unpin(rdev);
972} 972}
973 973
974void r600_pcie_gart_fini(struct radeon_device *rdev) 974static void r600_pcie_gart_fini(struct radeon_device *rdev)
975{ 975{
976 radeon_gart_fini(rdev); 976 radeon_gart_fini(rdev);
977 r600_pcie_gart_disable(rdev); 977 r600_pcie_gart_disable(rdev);
978 radeon_gart_table_vram_free(rdev); 978 radeon_gart_table_vram_free(rdev);
979} 979}
980 980
981void r600_agp_enable(struct radeon_device *rdev) 981static void r600_agp_enable(struct radeon_device *rdev)
982{ 982{
983 u32 tmp; 983 u32 tmp;
984 int i; 984 int i;
@@ -1158,7 +1158,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1158 } 1158 }
1159} 1159}
1160 1160
1161int r600_mc_init(struct radeon_device *rdev) 1161static int r600_mc_init(struct radeon_device *rdev)
1162{ 1162{
1163 u32 tmp; 1163 u32 tmp;
1164 int chansize, numchan; 1164 int chansize, numchan;
@@ -1258,7 +1258,7 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
1258 * reset, it's up to the caller to determine if the GPU needs one. We 1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that. 1259 * might add an helper function to check that.
1260 */ 1260 */
1261int r600_gpu_soft_reset(struct radeon_device *rdev) 1261static int r600_gpu_soft_reset(struct radeon_device *rdev)
1262{ 1262{
1263 struct rv515_mc_save save; 1263 struct rv515_mc_save save;
1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
@@ -1433,7 +1433,7 @@ int r600_count_pipe_bits(uint32_t val)
1433 return ret; 1433 return ret;
1434} 1434}
1435 1435
1436void r600_gpu_init(struct radeon_device *rdev) 1436static void r600_gpu_init(struct radeon_device *rdev)
1437{ 1437{
1438 u32 tiling_config; 1438 u32 tiling_config;
1439 u32 ramcfg; 1439 u32 ramcfg;
@@ -2347,7 +2347,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2347 /* FIXME: implement */ 2347 /* FIXME: implement */
2348} 2348}
2349 2349
2350int r600_startup(struct radeon_device *rdev) 2350static int r600_startup(struct radeon_device *rdev)
2351{ 2351{
2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2353 int r; 2353 int r;
@@ -2635,10 +2635,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2635 return r; 2635 return r;
2636 } 2636 }
2637 WREG32(scratch, 0xCAFEDEAD); 2637 WREG32(scratch, 0xCAFEDEAD);
2638 r = radeon_ib_get(rdev, ring->idx, &ib, 256); 2638 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
2639 if (r) { 2639 if (r) {
2640 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 2640 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2641 return r; 2641 goto free_scratch;
2642 } 2642 }
2643 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 2643 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2644 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2644 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2646,15 +2646,13 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2646 ib.length_dw = 3; 2646 ib.length_dw = 3;
2647 r = radeon_ib_schedule(rdev, &ib, NULL); 2647 r = radeon_ib_schedule(rdev, &ib, NULL);
2648 if (r) { 2648 if (r) {
2649 radeon_scratch_free(rdev, scratch);
2650 radeon_ib_free(rdev, &ib);
2651 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 2649 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2652 return r; 2650 goto free_ib;
2653 } 2651 }
2654 r = radeon_fence_wait(ib.fence, false); 2652 r = radeon_fence_wait(ib.fence, false);
2655 if (r) { 2653 if (r) {
2656 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 2654 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2657 return r; 2655 goto free_ib;
2658 } 2656 }
2659 for (i = 0; i < rdev->usec_timeout; i++) { 2657 for (i = 0; i < rdev->usec_timeout; i++) {
2660 tmp = RREG32(scratch); 2658 tmp = RREG32(scratch);
@@ -2669,8 +2667,10 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
2669 scratch, tmp); 2667 scratch, tmp);
2670 r = -EINVAL; 2668 r = -EINVAL;
2671 } 2669 }
2672 radeon_scratch_free(rdev, scratch); 2670free_ib:
2673 radeon_ib_free(rdev, &ib); 2671 radeon_ib_free(rdev, &ib);
2672free_scratch:
2673 radeon_scratch_free(rdev, scratch);
2674 return r; 2674 return r;
2675} 2675}
2676 2676
@@ -3088,10 +3088,6 @@ int r600_irq_set(struct radeon_device *rdev)
3088 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3088 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3089 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3089 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
3090 } 3090 }
3091 if (rdev->irq.gui_idle) {
3092 DRM_DEBUG("gui idle\n");
3093 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3094 }
3095 3091
3096 WREG32(CP_INT_CNTL, cp_int_cntl); 3092 WREG32(CP_INT_CNTL, cp_int_cntl);
3097 WREG32(DxMODE_INT_MASK, mode_int); 3093 WREG32(DxMODE_INT_MASK, mode_int);
@@ -3475,7 +3471,6 @@ restart_ih:
3475 break; 3471 break;
3476 case 233: /* GUI IDLE */ 3472 case 233: /* GUI IDLE */
3477 DRM_DEBUG("IH: GUI idle\n"); 3473 DRM_DEBUG("IH: GUI idle\n");
3478 wake_up(&rdev->irq.idle_queue);
3479 break; 3474 break;
3480 default: 3475 default:
3481 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3476 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 26ace5623dc7..77da1f9c0b8e 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -488,31 +488,36 @@ set_default_state(drm_radeon_private_t *dev_priv)
488 ADVANCE_RING(); 488 ADVANCE_RING();
489} 489}
490 490
491static uint32_t i2f(uint32_t input) 491/* 23 bits of float fractional data */
492#define I2F_FRAC_BITS 23
493#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
494
495/*
496 * Converts unsigned integer into 32-bit IEEE floating point representation.
497 * Will be exact from 0 to 2^24. Above that, we round towards zero
498 * as the fractional bits will not fit in a float. (It would be better to
499 * round towards even as the fpu does, but that is slower.)
500 */
501__pure uint32_t int2float(uint32_t x)
492{ 502{
493 u32 result, i, exponent, fraction; 503 uint32_t msb, exponent, fraction;
494 504
495 if ((input & 0x3fff) == 0) 505 /* Zero is special */
496 result = 0; /* 0 is a special case */ 506 if (!x) return 0;
497 else { 507
498 exponent = 140; /* exponent biased by 127; */ 508 /* Get location of the most significant bit */
499 fraction = (input & 0x3fff) << 10; /* cheat and only 509 msb = __fls(x);
500 handle numbers below 2^^15 */
501 for (i = 0; i < 14; i++) {
502 if (fraction & 0x800000)
503 break;
504 else {
505 fraction = fraction << 1; /* keep
506 shifting left until top bit = 1 */
507 exponent = exponent - 1;
508 }
509 }
510 result = exponent << 23 | (fraction & 0x7fffff); /* mask
511 off top bit; assumed 1 */
512 }
513 return result;
514}
515 510
511 /*
512 * Use a rotate instead of a shift because that works both leftwards
513 * and rightwards due to the mod(32) behaviour. This means we don't
514 * need to check to see if we are above 2^24 or not.
515 */
516 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
517 exponent = (127 + msb) << I2F_FRAC_BITS;
518
519 return fraction + exponent;
520}
516 521
517static int r600_nomm_get_vb(struct drm_device *dev) 522static int r600_nomm_get_vb(struct drm_device *dev)
518{ 523{
@@ -631,20 +636,20 @@ r600_blit_copy(struct drm_device *dev,
631 vb = r600_nomm_get_vb_ptr(dev); 636 vb = r600_nomm_get_vb_ptr(dev);
632 } 637 }
633 638
634 vb[0] = i2f(dst_x); 639 vb[0] = int2float(dst_x);
635 vb[1] = 0; 640 vb[1] = 0;
636 vb[2] = i2f(src_x); 641 vb[2] = int2float(src_x);
637 vb[3] = 0; 642 vb[3] = 0;
638 643
639 vb[4] = i2f(dst_x); 644 vb[4] = int2float(dst_x);
640 vb[5] = i2f(h); 645 vb[5] = int2float(h);
641 vb[6] = i2f(src_x); 646 vb[6] = int2float(src_x);
642 vb[7] = i2f(h); 647 vb[7] = int2float(h);
643 648
644 vb[8] = i2f(dst_x + cur_size); 649 vb[8] = int2float(dst_x + cur_size);
645 vb[9] = i2f(h); 650 vb[9] = int2float(h);
646 vb[10] = i2f(src_x + cur_size); 651 vb[10] = int2float(src_x + cur_size);
647 vb[11] = i2f(h); 652 vb[11] = int2float(h);
648 653
649 /* src */ 654 /* src */
650 set_tex_resource(dev_priv, FMT_8, 655 set_tex_resource(dev_priv, FMT_8,
@@ -720,20 +725,20 @@ r600_blit_copy(struct drm_device *dev,
720 vb = r600_nomm_get_vb_ptr(dev); 725 vb = r600_nomm_get_vb_ptr(dev);
721 } 726 }
722 727
723 vb[0] = i2f(dst_x / 4); 728 vb[0] = int2float(dst_x / 4);
724 vb[1] = 0; 729 vb[1] = 0;
725 vb[2] = i2f(src_x / 4); 730 vb[2] = int2float(src_x / 4);
726 vb[3] = 0; 731 vb[3] = 0;
727 732
728 vb[4] = i2f(dst_x / 4); 733 vb[4] = int2float(dst_x / 4);
729 vb[5] = i2f(h); 734 vb[5] = int2float(h);
730 vb[6] = i2f(src_x / 4); 735 vb[6] = int2float(src_x / 4);
731 vb[7] = i2f(h); 736 vb[7] = int2float(h);
732 737
733 vb[8] = i2f((dst_x + cur_size) / 4); 738 vb[8] = int2float((dst_x + cur_size) / 4);
734 vb[9] = i2f(h); 739 vb[9] = int2float(h);
735 vb[10] = i2f((src_x + cur_size) / 4); 740 vb[10] = int2float((src_x + cur_size) / 4);
736 vb[11] = i2f(h); 741 vb[11] = int2float(h);
737 742
738 /* src */ 743 /* src */
739 set_tex_resource(dev_priv, FMT_8_8_8_8, 744 set_tex_resource(dev_priv, FMT_8_8_8_8,
@@ -803,20 +808,20 @@ r600_blit_swap(struct drm_device *dev,
803 dx2 = dx + w; 808 dx2 = dx + w;
804 dy2 = dy + h; 809 dy2 = dy + h;
805 810
806 vb[0] = i2f(dx); 811 vb[0] = int2float(dx);
807 vb[1] = i2f(dy); 812 vb[1] = int2float(dy);
808 vb[2] = i2f(sx); 813 vb[2] = int2float(sx);
809 vb[3] = i2f(sy); 814 vb[3] = int2float(sy);
810 815
811 vb[4] = i2f(dx); 816 vb[4] = int2float(dx);
812 vb[5] = i2f(dy2); 817 vb[5] = int2float(dy2);
813 vb[6] = i2f(sx); 818 vb[6] = int2float(sx);
814 vb[7] = i2f(sy2); 819 vb[7] = int2float(sy2);
815 820
816 vb[8] = i2f(dx2); 821 vb[8] = int2float(dx2);
817 vb[9] = i2f(dy2); 822 vb[9] = int2float(dy2);
818 vb[10] = i2f(sx2); 823 vb[10] = int2float(sx2);
819 vb[11] = i2f(sy2); 824 vb[11] = int2float(sy2);
820 825
821 switch(cpp) { 826 switch(cpp) {
822 case 4: 827 case 4:
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index aec8487662c4..e082dca6feee 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -454,46 +454,6 @@ set_default_state(struct radeon_device *rdev)
454 radeon_ring_write(ring, sq_stack_resource_mgmt_2); 454 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
455} 455}
456 456
457#define I2F_MAX_BITS 15
458#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
459#define I2F_SHIFT (24 - I2F_MAX_BITS)
460
461/*
462 * Converts unsigned integer into 32-bit IEEE floating point representation.
463 * Conversion is not universal and only works for the range from 0
464 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
465 * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
466 * I2F_MAX_BITS can be increased, but that will add to the loop iterations
467 * and slow us down. Conversion is done by shifting the input and counting
468 * down until the first 1 reaches bit position 23. The resulting counter
469 * and the shifted input are, respectively, the exponent and the fraction.
470 * The sign is always zero.
471 */
472static uint32_t i2f(uint32_t input)
473{
474 u32 result, i, exponent, fraction;
475
476 WARN_ON_ONCE(input > I2F_MAX_INPUT);
477
478 if ((input & I2F_MAX_INPUT) == 0)
479 result = 0;
480 else {
481 exponent = 126 + I2F_MAX_BITS;
482 fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
483
484 for (i = 0; i < I2F_MAX_BITS; i++) {
485 if (fraction & 0x800000)
486 break;
487 else {
488 fraction = fraction << 1;
489 exponent = exponent - 1;
490 }
491 }
492 result = exponent << 23 | (fraction & 0x7fffff);
493 }
494 return result;
495}
496
497int r600_blit_init(struct radeon_device *rdev) 457int r600_blit_init(struct radeon_device *rdev)
498{ 458{
499 u32 obj_size; 459 u32 obj_size;
@@ -765,14 +725,14 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
765 vb_cpu_addr[3] = 0; 725 vb_cpu_addr[3] = 0;
766 726
767 vb_cpu_addr[4] = 0; 727 vb_cpu_addr[4] = 0;
768 vb_cpu_addr[5] = i2f(h); 728 vb_cpu_addr[5] = int2float(h);
769 vb_cpu_addr[6] = 0; 729 vb_cpu_addr[6] = 0;
770 vb_cpu_addr[7] = i2f(h); 730 vb_cpu_addr[7] = int2float(h);
771 731
772 vb_cpu_addr[8] = i2f(w); 732 vb_cpu_addr[8] = int2float(w);
773 vb_cpu_addr[9] = i2f(h); 733 vb_cpu_addr[9] = int2float(h);
774 vb_cpu_addr[10] = i2f(w); 734 vb_cpu_addr[10] = int2float(w);
775 vb_cpu_addr[11] = i2f(h); 735 vb_cpu_addr[11] = int2float(h);
776 736
777 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, 737 rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
778 w, h, w, src_gpu_addr, size_in_bytes); 738 w, h, w, src_gpu_addr, size_in_bytes);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index f437d36dd98c..2f3ce7a75976 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,4 +35,5 @@ extern const u32 r6xx_default_state[];
35extern const u32 r6xx_ps_size, r6xx_vs_size; 35extern const u32 r6xx_ps_size, r6xx_vs_size;
36extern const u32 r6xx_default_size, r7xx_default_size; 36extern const u32 r6xx_default_size, r7xx_default_size;
37 37
38__pure uint32_t int2float(uint32_t x);
38#endif 39#endif
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb92646a5e55..211c40252fe0 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -847,7 +847,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
847 * Assume that chunk_ib_index is properly set. Will return -EINVAL 847 * Assume that chunk_ib_index is properly set. Will return -EINVAL
848 * if packet is bigger than remaining ib size. or if packets is unknown. 848 * if packet is bigger than remaining ib size. or if packets is unknown.
849 **/ 849 **/
850int r600_cs_packet_parse(struct radeon_cs_parser *p, 850static int r600_cs_packet_parse(struct radeon_cs_parser *p,
851 struct radeon_cs_packet *pkt, 851 struct radeon_cs_packet *pkt,
852 unsigned idx) 852 unsigned idx)
853{ 853{
@@ -2180,7 +2180,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2180 } 2180 }
2181 break; 2181 break;
2182 case PACKET3_STRMOUT_BASE_UPDATE: 2182 case PACKET3_STRMOUT_BASE_UPDATE:
2183 if (p->family < CHIP_RV770) { 2183 /* RS780 and RS880 also need this */
2184 if (p->family < CHIP_RS780) {
2184 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2185 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2185 return -EINVAL; 2186 return -EINVAL;
2186 } 2187 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 23be9319c729..ff80efe9cb7d 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -53,7 +53,7 @@ enum r600_hdmi_iec_status_bits {
53 AUDIO_STATUS_LEVEL = 0x80 53 AUDIO_STATUS_LEVEL = 0x80
54}; 54};
55 55
56struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = { 56static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
57 /* 32kHz 44.1kHz 48kHz */ 57 /* 32kHz 44.1kHz 48kHz */
58 /* Clock N CTS N CTS N CTS */ 58 /* Clock N CTS N CTS N CTS */
59 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */ 59 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 59a15315ae9f..b04c06444d8b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -123,6 +123,7 @@ extern int radeon_lockup_timeout;
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 123#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 124
125/* hardcode those limit for now */ 125/* hardcode those limit for now */
126#define RADEON_VA_IB_OFFSET (1 << 20)
126#define RADEON_VA_RESERVED_SIZE (8 << 20) 127#define RADEON_VA_RESERVED_SIZE (8 << 20)
127#define RADEON_IB_VM_MAX_SIZE (64 << 10) 128#define RADEON_IB_VM_MAX_SIZE (64 << 10)
128 129
@@ -253,6 +254,22 @@ static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
253 } 254 }
254} 255}
255 256
257static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
258 struct radeon_fence *b)
259{
260 if (!a) {
261 return false;
262 }
263
264 if (!b) {
265 return true;
266 }
267
268 BUG_ON(a->ring != b->ring);
269
270 return a->seq < b->seq;
271}
272
256/* 273/*
257 * Tiling registers 274 * Tiling registers
258 */ 275 */
@@ -275,18 +292,20 @@ struct radeon_mman {
275 292
276/* bo virtual address in a specific vm */ 293/* bo virtual address in a specific vm */
277struct radeon_bo_va { 294struct radeon_bo_va {
278 /* bo list is protected by bo being reserved */ 295 /* protected by bo being reserved */
279 struct list_head bo_list; 296 struct list_head bo_list;
280 /* vm list is protected by vm mutex */
281 struct list_head vm_list;
282 /* constant after initialization */
283 struct radeon_vm *vm;
284 struct radeon_bo *bo;
285 uint64_t soffset; 297 uint64_t soffset;
286 uint64_t eoffset; 298 uint64_t eoffset;
287 uint32_t flags; 299 uint32_t flags;
288 struct radeon_fence *fence;
289 bool valid; 300 bool valid;
301 unsigned ref_count;
302
303 /* protected by vm mutex */
304 struct list_head vm_list;
305
306 /* constant after initialization */
307 struct radeon_vm *vm;
308 struct radeon_bo *bo;
290}; 309};
291 310
292struct radeon_bo { 311struct radeon_bo {
@@ -566,9 +585,6 @@ struct radeon_irq {
566 atomic_t pflip[RADEON_MAX_CRTCS]; 585 atomic_t pflip[RADEON_MAX_CRTCS];
567 wait_queue_head_t vblank_queue; 586 wait_queue_head_t vblank_queue;
568 bool hpd[RADEON_MAX_HPD_PINS]; 587 bool hpd[RADEON_MAX_HPD_PINS];
569 bool gui_idle;
570 bool gui_idle_acked;
571 wait_queue_head_t idle_queue;
572 bool afmt[RADEON_MAX_AFMT_BLOCKS]; 588 bool afmt[RADEON_MAX_AFMT_BLOCKS];
573 union radeon_irq_stat_regs stat_regs; 589 union radeon_irq_stat_regs stat_regs;
574}; 590};
@@ -583,7 +599,6 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
583void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); 599void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
584void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 600void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
585void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); 601void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
586int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
587 602
588/* 603/*
589 * CP & rings. 604 * CP & rings.
@@ -596,7 +611,7 @@ struct radeon_ib {
596 uint32_t *ptr; 611 uint32_t *ptr;
597 int ring; 612 int ring;
598 struct radeon_fence *fence; 613 struct radeon_fence *fence;
599 unsigned vm_id; 614 struct radeon_vm *vm;
600 bool is_const_ib; 615 bool is_const_ib;
601 struct radeon_fence *sync_to[RADEON_NUM_RINGS]; 616 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
602 struct radeon_semaphore *semaphore; 617 struct radeon_semaphore *semaphore;
@@ -632,41 +647,38 @@ struct radeon_ring {
632/* 647/*
633 * VM 648 * VM
634 */ 649 */
650
651/* maximum number of VMIDs */
652#define RADEON_NUM_VM 16
653
654/* defines number of bits in page table versus page directory,
655 * a page is 4KB so we have 12 bits offset, 9 bits in the page
656 * table and the remaining 19 bits are in the page directory */
657#define RADEON_VM_BLOCK_SIZE 9
658
659/* number of entries in page table */
660#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
661
635struct radeon_vm { 662struct radeon_vm {
636 struct list_head list; 663 struct list_head list;
637 struct list_head va; 664 struct list_head va;
638 int id; 665 unsigned id;
639 unsigned last_pfn; 666 unsigned last_pfn;
640 u64 pt_gpu_addr; 667 u64 pd_gpu_addr;
641 u64 *pt;
642 struct radeon_sa_bo *sa_bo; 668 struct radeon_sa_bo *sa_bo;
643 struct mutex mutex; 669 struct mutex mutex;
644 /* last fence for cs using this vm */ 670 /* last fence for cs using this vm */
645 struct radeon_fence *fence; 671 struct radeon_fence *fence;
646}; 672 /* last flush or NULL if we still need to flush */
647 673 struct radeon_fence *last_flush;
648struct radeon_vm_funcs {
649 int (*init)(struct radeon_device *rdev);
650 void (*fini)(struct radeon_device *rdev);
651 /* cs mutex must be lock for schedule_ib */
652 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
653 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
654 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
655 uint32_t (*page_flags)(struct radeon_device *rdev,
656 struct radeon_vm *vm,
657 uint32_t flags);
658 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
659 unsigned pfn, uint64_t addr, uint32_t flags);
660}; 674};
661 675
662struct radeon_vm_manager { 676struct radeon_vm_manager {
663 struct mutex lock; 677 struct mutex lock;
664 struct list_head lru_vm; 678 struct list_head lru_vm;
665 uint32_t use_bitmap; 679 struct radeon_fence *active[RADEON_NUM_VM];
666 struct radeon_sa_manager sa_manager; 680 struct radeon_sa_manager sa_manager;
667 uint32_t max_pfn; 681 uint32_t max_pfn;
668 /* fields constant after init */
669 const struct radeon_vm_funcs *funcs;
670 /* number of VMIDs */ 682 /* number of VMIDs */
671 unsigned nvm; 683 unsigned nvm;
672 /* vram base address for page table entry */ 684 /* vram base address for page table entry */
@@ -738,7 +750,8 @@ struct si_rlc {
738}; 750};
739 751
740int radeon_ib_get(struct radeon_device *rdev, int ring, 752int radeon_ib_get(struct radeon_device *rdev, int ring,
741 struct radeon_ib *ib, unsigned size); 753 struct radeon_ib *ib, struct radeon_vm *vm,
754 unsigned size);
742void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 755void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
743int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 756int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
744 struct radeon_ib *const_ib); 757 struct radeon_ib *const_ib);
@@ -1131,6 +1144,15 @@ struct radeon_asic {
1131 void (*tlb_flush)(struct radeon_device *rdev); 1144 void (*tlb_flush)(struct radeon_device *rdev);
1132 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); 1145 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1133 } gart; 1146 } gart;
1147 struct {
1148 int (*init)(struct radeon_device *rdev);
1149 void (*fini)(struct radeon_device *rdev);
1150
1151 u32 pt_ring_index;
1152 void (*set_page)(struct radeon_device *rdev, uint64_t pe,
1153 uint64_t addr, unsigned count,
1154 uint32_t incr, uint32_t flags);
1155 } vm;
1134 /* ring specific callbacks */ 1156 /* ring specific callbacks */
1135 struct { 1157 struct {
1136 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); 1158 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -1143,6 +1165,7 @@ struct radeon_asic {
1143 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1165 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1144 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1166 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1145 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); 1167 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1168 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
1146 } ring[RADEON_NUM_RINGS]; 1169 } ring[RADEON_NUM_RINGS];
1147 /* irqs */ 1170 /* irqs */
1148 struct { 1171 struct {
@@ -1157,6 +1180,10 @@ struct radeon_asic {
1157 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); 1180 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1158 /* wait for vblank */ 1181 /* wait for vblank */
1159 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); 1182 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1183 /* set backlight level */
1184 void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
1185 /* get backlight level */
1186 u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
1160 } display; 1187 } display;
1161 /* copy functions for bo handling */ 1188 /* copy functions for bo handling */
1162 struct { 1189 struct {
@@ -1428,6 +1455,56 @@ struct r600_vram_scratch {
1428 u64 gpu_addr; 1455 u64 gpu_addr;
1429}; 1456};
1430 1457
1458/*
1459 * ACPI
1460 */
1461struct radeon_atif_notification_cfg {
1462 bool enabled;
1463 int command_code;
1464};
1465
1466struct radeon_atif_notifications {
1467 bool display_switch;
1468 bool expansion_mode_change;
1469 bool thermal_state;
1470 bool forced_power_state;
1471 bool system_power_state;
1472 bool display_conf_change;
1473 bool px_gfx_switch;
1474 bool brightness_change;
1475 bool dgpu_display_event;
1476};
1477
1478struct radeon_atif_functions {
1479 bool system_params;
1480 bool sbios_requests;
1481 bool select_active_disp;
1482 bool lid_state;
1483 bool get_tv_standard;
1484 bool set_tv_standard;
1485 bool get_panel_expansion_mode;
1486 bool set_panel_expansion_mode;
1487 bool temperature_change;
1488 bool graphics_device_types;
1489};
1490
1491struct radeon_atif {
1492 struct radeon_atif_notifications notifications;
1493 struct radeon_atif_functions functions;
1494 struct radeon_atif_notification_cfg notification_cfg;
1495 struct radeon_encoder *encoder_for_bl;
1496};
1497
1498struct radeon_atcs_functions {
1499 bool get_ext_state;
1500 bool pcie_perf_req;
1501 bool pcie_dev_rdy;
1502 bool pcie_bus_width;
1503};
1504
1505struct radeon_atcs {
1506 struct radeon_atcs_functions functions;
1507};
1431 1508
1432/* 1509/*
1433 * Core structure, functions and helpers. 1510 * Core structure, functions and helpers.
@@ -1520,6 +1597,9 @@ struct radeon_device {
1520 /* virtual memory */ 1597 /* virtual memory */
1521 struct radeon_vm_manager vm_manager; 1598 struct radeon_vm_manager vm_manager;
1522 struct mutex gpu_clock_mutex; 1599 struct mutex gpu_clock_mutex;
1600 /* ACPI interface */
1601 struct radeon_atif atif;
1602 struct radeon_atcs atcs;
1523}; 1603};
1524 1604
1525int radeon_device_init(struct radeon_device *rdev, 1605int radeon_device_init(struct radeon_device *rdev,
@@ -1683,15 +1763,21 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1683#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) 1763#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1684#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) 1764#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1685#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1765#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
1766#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
1767#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
1768#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
1686#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 1769#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1687#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 1770#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1688#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1771#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1689#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) 1772#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1690#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) 1773#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1691#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) 1774#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
1775#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
1692#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) 1776#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1693#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) 1777#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1694#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) 1778#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1779#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
1780#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
1695#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) 1781#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1696#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) 1782#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1697#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) 1783#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
@@ -1759,22 +1845,30 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
1759void radeon_vm_manager_fini(struct radeon_device *rdev); 1845void radeon_vm_manager_fini(struct radeon_device *rdev);
1760int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 1846int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1761void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 1847void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1762int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); 1848int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
1763void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 1849struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
1850 struct radeon_vm *vm, int ring);
1851void radeon_vm_fence(struct radeon_device *rdev,
1852 struct radeon_vm *vm,
1853 struct radeon_fence *fence);
1854uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
1764int radeon_vm_bo_update_pte(struct radeon_device *rdev, 1855int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1765 struct radeon_vm *vm, 1856 struct radeon_vm *vm,
1766 struct radeon_bo *bo, 1857 struct radeon_bo *bo,
1767 struct ttm_mem_reg *mem); 1858 struct ttm_mem_reg *mem);
1768void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1859void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1769 struct radeon_bo *bo); 1860 struct radeon_bo *bo);
1770int radeon_vm_bo_add(struct radeon_device *rdev, 1861struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
1771 struct radeon_vm *vm, 1862 struct radeon_bo *bo);
1772 struct radeon_bo *bo, 1863struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
1773 uint64_t offset, 1864 struct radeon_vm *vm,
1774 uint32_t flags); 1865 struct radeon_bo *bo);
1866int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1867 struct radeon_bo_va *bo_va,
1868 uint64_t offset,
1869 uint32_t flags);
1775int radeon_vm_bo_rmv(struct radeon_device *rdev, 1870int radeon_vm_bo_rmv(struct radeon_device *rdev,
1776 struct radeon_vm *vm, 1871 struct radeon_bo_va *bo_va);
1777 struct radeon_bo *bo);
1778 1872
1779/* audio */ 1873/* audio */
1780void r600_audio_update_hdmi(struct work_struct *work); 1874void r600_audio_update_hdmi(struct work_struct *work);
@@ -1832,12 +1926,14 @@ extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_displ
1832extern int ni_init_microcode(struct radeon_device *rdev); 1926extern int ni_init_microcode(struct radeon_device *rdev);
1833extern int ni_mc_load_microcode(struct radeon_device *rdev); 1927extern int ni_mc_load_microcode(struct radeon_device *rdev);
1834 1928
1835/* radeon_acpi.c */ 1929/* radeon_acpi.c */
1836#if defined(CONFIG_ACPI) 1930#if defined(CONFIG_ACPI)
1837extern int radeon_acpi_init(struct radeon_device *rdev); 1931extern int radeon_acpi_init(struct radeon_device *rdev);
1838#else 1932extern void radeon_acpi_fini(struct radeon_device *rdev);
1839static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 1933#else
1840#endif 1934static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1935static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
1936#endif
1841 1937
1842#include "radeon_object.h" 1938#include "radeon_object.h"
1843 1939
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 215063e1a292..b0a5688c67f8 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -1,33 +1,118 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
1#include <linux/pci.h> 24#include <linux/pci.h>
2#include <linux/acpi.h> 25#include <linux/acpi.h>
3#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/power_supply.h>
4#include <acpi/acpi_drivers.h> 28#include <acpi/acpi_drivers.h>
5#include <acpi/acpi_bus.h> 29#include <acpi/acpi_bus.h>
30#include <acpi/video.h>
6 31
7#include <drm/drmP.h> 32#include <drm/drmP.h>
8#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
9#include "radeon.h" 34#include "radeon.h"
35#include "radeon_acpi.h"
36#include "atom.h"
10 37
11#include <linux/vga_switcheroo.h> 38#include <linux/vga_switcheroo.h>
12 39
40#define ACPI_AC_CLASS "ac_adapter"
41
42extern void radeon_pm_acpi_event_handler(struct radeon_device *rdev);
43
44struct atif_verify_interface {
45 u16 size; /* structure size in bytes (includes size field) */
46 u16 version; /* version */
47 u32 notification_mask; /* supported notifications mask */
48 u32 function_bits; /* supported functions bit vector */
49} __packed;
50
51struct atif_system_params {
52 u16 size; /* structure size in bytes (includes size field) */
53 u32 valid_mask; /* valid flags mask */
54 u32 flags; /* flags */
55 u8 command_code; /* notify command code */
56} __packed;
57
58struct atif_sbios_requests {
59 u16 size; /* structure size in bytes (includes size field) */
60 u32 pending; /* pending sbios requests */
61 u8 panel_exp_mode; /* panel expansion mode */
62 u8 thermal_gfx; /* thermal state: target gfx controller */
63 u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
64 u8 forced_power_gfx; /* forced power state: target gfx controller */
65 u8 forced_power_state; /* forced power state: state id */
66 u8 system_power_src; /* system power source */
67 u8 backlight_level; /* panel backlight level (0-255) */
68} __packed;
69
70#define ATIF_NOTIFY_MASK 0x3
71#define ATIF_NOTIFY_NONE 0
72#define ATIF_NOTIFY_81 1
73#define ATIF_NOTIFY_N 2
74
75struct atcs_verify_interface {
76 u16 size; /* structure size in bytes (includes size field) */
77 u16 version; /* version */
78 u32 function_bits; /* supported functions bit vector */
79} __packed;
80
13/* Call the ATIF method 81/* Call the ATIF method
82 */
83/**
84 * radeon_atif_call - call an ATIF method
14 * 85 *
15 * Note: currently we discard the output 86 * @handle: acpi handle
87 * @function: the ATIF function to execute
88 * @params: ATIF function params
89 *
90 * Executes the requested ATIF function (all asics).
91 * Returns a pointer to the acpi output buffer.
16 */ 92 */
17static int radeon_atif_call(acpi_handle handle) 93static union acpi_object *radeon_atif_call(acpi_handle handle, int function,
94 struct acpi_buffer *params)
18{ 95{
19 acpi_status status; 96 acpi_status status;
20 union acpi_object atif_arg_elements[2]; 97 union acpi_object atif_arg_elements[2];
21 struct acpi_object_list atif_arg; 98 struct acpi_object_list atif_arg;
22 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; 99 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
23 100
24 atif_arg.count = 2; 101 atif_arg.count = 2;
25 atif_arg.pointer = &atif_arg_elements[0]; 102 atif_arg.pointer = &atif_arg_elements[0];
26 103
27 atif_arg_elements[0].type = ACPI_TYPE_INTEGER; 104 atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
28 atif_arg_elements[0].integer.value = 0; 105 atif_arg_elements[0].integer.value = function;
29 atif_arg_elements[1].type = ACPI_TYPE_INTEGER; 106
30 atif_arg_elements[1].integer.value = 0; 107 if (params) {
108 atif_arg_elements[1].type = ACPI_TYPE_BUFFER;
109 atif_arg_elements[1].buffer.length = params->length;
110 atif_arg_elements[1].buffer.pointer = params->pointer;
111 } else {
112 /* We need a second fake parameter */
113 atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
114 atif_arg_elements[1].integer.value = 0;
115 }
31 116
32 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); 117 status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
33 118
@@ -36,17 +121,434 @@ static int radeon_atif_call(acpi_handle handle)
36 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", 121 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
37 acpi_format_exception(status)); 122 acpi_format_exception(status));
38 kfree(buffer.pointer); 123 kfree(buffer.pointer);
39 return 1; 124 return NULL;
40 } 125 }
41 126
42 kfree(buffer.pointer); 127 return buffer.pointer;
43 return 0; 128}
129
130/**
131 * radeon_atif_parse_notification - parse supported notifications
132 *
133 * @n: supported notifications struct
134 * @mask: supported notifications mask from ATIF
135 *
136 * Use the supported notifications mask from ATIF function
137 * ATIF_FUNCTION_VERIFY_INTERFACE to determine what notifications
138 * are supported (all asics).
139 */
140static void radeon_atif_parse_notification(struct radeon_atif_notifications *n, u32 mask)
141{
142 n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
143 n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
144 n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
145 n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
146 n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
147 n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
148 n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
149 n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
150 n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
151}
152
153/**
154 * radeon_atif_parse_functions - parse supported functions
155 *
156 * @f: supported functions struct
157 * @mask: supported functions mask from ATIF
158 *
159 * Use the supported functions mask from ATIF function
160 * ATIF_FUNCTION_VERIFY_INTERFACE to determine what functions
161 * are supported (all asics).
162 */
163static void radeon_atif_parse_functions(struct radeon_atif_functions *f, u32 mask)
164{
165 f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
166 f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
167 f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
168 f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
169 f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
170 f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
171 f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
172 f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
173 f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
174 f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
175}
176
177/**
178 * radeon_atif_verify_interface - verify ATIF
179 *
180 * @handle: acpi handle
181 * @atif: radeon atif struct
182 *
183 * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
184 * to initialize ATIF and determine what features are supported
185 * (all asics).
186 * returns 0 on success, error on failure.
187 */
188static int radeon_atif_verify_interface(acpi_handle handle,
189 struct radeon_atif *atif)
190{
191 union acpi_object *info;
192 struct atif_verify_interface output;
193 size_t size;
194 int err = 0;
195
196 info = radeon_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
197 if (!info)
198 return -EIO;
199
200 memset(&output, 0, sizeof(output));
201
202 size = *(u16 *) info->buffer.pointer;
203 if (size < 12) {
204 DRM_INFO("ATIF buffer is too small: %lu\n", size);
205 err = -EINVAL;
206 goto out;
207 }
208 size = min(sizeof(output), size);
209
210 memcpy(&output, info->buffer.pointer, size);
211
212 /* TODO: check version? */
213 DRM_DEBUG_DRIVER("ATIF version %u\n", output.version);
214
215 radeon_atif_parse_notification(&atif->notifications, output.notification_mask);
216 radeon_atif_parse_functions(&atif->functions, output.function_bits);
217
218out:
219 kfree(info);
220 return err;
221}
222
223/**
224 * radeon_atif_get_notification_params - determine notify configuration
225 *
226 * @handle: acpi handle
227 * @n: atif notification configuration struct
228 *
229 * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
230 * to determine if a notifier is used and if so which one
231 * (all asics). This is either Notify(VGA, 0x81) or Notify(VGA, n)
232 * where n is specified in the result if a notifier is used.
233 * Returns 0 on success, error on failure.
234 */
235static int radeon_atif_get_notification_params(acpi_handle handle,
236 struct radeon_atif_notification_cfg *n)
237{
238 union acpi_object *info;
239 struct atif_system_params params;
240 size_t size;
241 int err = 0;
242
243 info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
244 if (!info) {
245 err = -EIO;
246 goto out;
247 }
248
249 size = *(u16 *) info->buffer.pointer;
250 if (size < 10) {
251 err = -EINVAL;
252 goto out;
253 }
254
255 memset(&params, 0, sizeof(params));
256 size = min(sizeof(params), size);
257 memcpy(&params, info->buffer.pointer, size);
258
259 DRM_DEBUG_DRIVER("SYSTEM_PARAMS: mask = %#x, flags = %#x\n",
260 params.flags, params.valid_mask);
261 params.flags = params.flags & params.valid_mask;
262
263 if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_NONE) {
264 n->enabled = false;
265 n->command_code = 0;
266 } else if ((params.flags & ATIF_NOTIFY_MASK) == ATIF_NOTIFY_81) {
267 n->enabled = true;
268 n->command_code = 0x81;
269 } else {
270 if (size < 11) {
271 err = -EINVAL;
272 goto out;
273 }
274 n->enabled = true;
275 n->command_code = params.command_code;
276 }
277
278out:
279 DRM_DEBUG_DRIVER("Notification %s, command code = %#x\n",
280 (n->enabled ? "enabled" : "disabled"),
281 n->command_code);
282 kfree(info);
283 return err;
284}
285
286/**
287 * radeon_atif_get_sbios_requests - get requested sbios event
288 *
289 * @handle: acpi handle
290 * @req: atif sbios request struct
291 *
292 * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
293 * to determine what requests the sbios is making to the driver
294 * (all asics).
295 * Returns 0 on success, error on failure.
296 */
297static int radeon_atif_get_sbios_requests(acpi_handle handle,
298 struct atif_sbios_requests *req)
299{
300 union acpi_object *info;
301 size_t size;
302 int count = 0;
303
304 info = radeon_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
305 if (!info)
306 return -EIO;
307
308 size = *(u16 *)info->buffer.pointer;
309 if (size < 0xd) {
310 count = -EINVAL;
311 goto out;
312 }
313 memset(req, 0, sizeof(*req));
314
315 size = min(sizeof(*req), size);
316 memcpy(req, info->buffer.pointer, size);
317 DRM_DEBUG_DRIVER("SBIOS pending requests: %#x\n", req->pending);
318
319 count = hweight32(req->pending);
320
321out:
322 kfree(info);
323 return count;
324}
325
326/**
327 * radeon_atif_handler - handle ATIF notify requests
328 *
329 * @rdev: radeon_device pointer
330 * @event: atif sbios request struct
331 *
332 * Checks the acpi event and if it matches an atif event,
333 * handles it.
334 * Returns NOTIFY code
335 */
336int radeon_atif_handler(struct radeon_device *rdev,
337 struct acpi_bus_event *event)
338{
339 struct radeon_atif *atif = &rdev->atif;
340 struct atif_sbios_requests req;
341 acpi_handle handle;
342 int count;
343
344 DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
345 event->device_class, event->type);
346
347 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
348 return NOTIFY_DONE;
349
350 if (!atif->notification_cfg.enabled ||
351 event->type != atif->notification_cfg.command_code)
352 /* Not our event */
353 return NOTIFY_DONE;
354
355 /* Check pending SBIOS requests */
356 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
357 count = radeon_atif_get_sbios_requests(handle, &req);
358
359 if (count <= 0)
360 return NOTIFY_DONE;
361
362 DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
363
364 if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
365 struct radeon_encoder *enc = atif->encoder_for_bl;
366
367 if (enc) {
368 DRM_DEBUG_DRIVER("Changing brightness to %d\n",
369 req.backlight_level);
370
371 radeon_set_backlight_level(rdev, enc, req.backlight_level);
372
373 if (rdev->is_atom_bios) {
374 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
375 backlight_force_update(dig->bl_dev,
376 BACKLIGHT_UPDATE_HOTKEY);
377 } else {
378 struct radeon_encoder_lvds *dig = enc->enc_priv;
379 backlight_force_update(dig->bl_dev,
380 BACKLIGHT_UPDATE_HOTKEY);
381 }
382 }
383 }
384 /* TODO: check other events */
385
386 /* We've handled the event, stop the notifier chain. The ACPI interface
387 * overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
388 * userspace if the event was generated only to signal a SBIOS
389 * request.
390 */
391 return NOTIFY_BAD;
392}
393
394/* Call the ATCS method
395 */
396/**
397 * radeon_atcs_call - call an ATCS method
398 *
399 * @handle: acpi handle
400 * @function: the ATCS function to execute
401 * @params: ATCS function params
402 *
403 * Executes the requested ATCS function (all asics).
404 * Returns a pointer to the acpi output buffer.
405 */
406static union acpi_object *radeon_atcs_call(acpi_handle handle, int function,
407 struct acpi_buffer *params)
408{
409 acpi_status status;
410 union acpi_object atcs_arg_elements[2];
411 struct acpi_object_list atcs_arg;
412 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
413
414 atcs_arg.count = 2;
415 atcs_arg.pointer = &atcs_arg_elements[0];
416
417 atcs_arg_elements[0].type = ACPI_TYPE_INTEGER;
418 atcs_arg_elements[0].integer.value = function;
419
420 if (params) {
421 atcs_arg_elements[1].type = ACPI_TYPE_BUFFER;
422 atcs_arg_elements[1].buffer.length = params->length;
423 atcs_arg_elements[1].buffer.pointer = params->pointer;
424 } else {
425 /* We need a second fake parameter */
426 atcs_arg_elements[1].type = ACPI_TYPE_INTEGER;
427 atcs_arg_elements[1].integer.value = 0;
428 }
429
430 status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
431
432 /* Fail only if calling the method fails and ATIF is supported */
433 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
434 DRM_DEBUG_DRIVER("failed to evaluate ATCS got %s\n",
435 acpi_format_exception(status));
436 kfree(buffer.pointer);
437 return NULL;
438 }
439
440 return buffer.pointer;
441}
442
443/**
444 * radeon_atcs_parse_functions - parse supported functions
445 *
446 * @f: supported functions struct
447 * @mask: supported functions mask from ATCS
448 *
449 * Use the supported functions mask from ATCS function
450 * ATCS_FUNCTION_VERIFY_INTERFACE to determine what functions
451 * are supported (all asics).
452 */
453static void radeon_atcs_parse_functions(struct radeon_atcs_functions *f, u32 mask)
454{
455 f->get_ext_state = mask & ATCS_GET_EXTERNAL_STATE_SUPPORTED;
456 f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
457 f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
458 f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
459}
460
461/**
462 * radeon_atcs_verify_interface - verify ATCS
463 *
464 * @handle: acpi handle
465 * @atcs: radeon atcs struct
466 *
467 * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
468 * to initialize ATCS and determine what features are supported
469 * (all asics).
470 * returns 0 on success, error on failure.
471 */
472static int radeon_atcs_verify_interface(acpi_handle handle,
473 struct radeon_atcs *atcs)
474{
475 union acpi_object *info;
476 struct atcs_verify_interface output;
477 size_t size;
478 int err = 0;
479
480 info = radeon_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
481 if (!info)
482 return -EIO;
483
484 memset(&output, 0, sizeof(output));
485
486 size = *(u16 *) info->buffer.pointer;
487 if (size < 8) {
488 DRM_INFO("ATCS buffer is too small: %lu\n", size);
489 err = -EINVAL;
490 goto out;
491 }
492 size = min(sizeof(output), size);
493
494 memcpy(&output, info->buffer.pointer, size);
495
496 /* TODO: check version? */
497 DRM_DEBUG_DRIVER("ATCS version %u\n", output.version);
498
499 radeon_atcs_parse_functions(&atcs->functions, output.function_bits);
500
501out:
502 kfree(info);
503 return err;
504}
505
506/**
507 * radeon_acpi_event - handle notify events
508 *
509 * @nb: notifier block
510 * @val: val
511 * @data: acpi event
512 *
513 * Calls relevant radeon functions in response to various
514 * acpi events.
515 * Returns NOTIFY code
516 */
517static int radeon_acpi_event(struct notifier_block *nb,
518 unsigned long val,
519 void *data)
520{
521 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
522 struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
523
524 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
525 if (power_supply_is_system_supplied() > 0)
526 DRM_DEBUG_DRIVER("pm: AC\n");
527 else
528 DRM_DEBUG_DRIVER("pm: DC\n");
529
530 radeon_pm_acpi_event_handler(rdev);
531 }
532
533 /* Check for pending SBIOS requests */
534 return radeon_atif_handler(rdev, entry);
44} 535}
45 536
46/* Call all ACPI methods here */ 537/* Call all ACPI methods here */
538/**
539 * radeon_acpi_init - init driver acpi support
540 *
541 * @rdev: radeon_device pointer
542 *
543 * Verifies the AMD ACPI interfaces and registers with the acpi
544 * notifier chain (all asics).
545 * Returns 0 on success, error on failure.
546 */
47int radeon_acpi_init(struct radeon_device *rdev) 547int radeon_acpi_init(struct radeon_device *rdev)
48{ 548{
49 acpi_handle handle; 549 acpi_handle handle;
550 struct radeon_atif *atif = &rdev->atif;
551 struct radeon_atcs *atcs = &rdev->atcs;
50 int ret; 552 int ret;
51 553
52 /* Get the device handle */ 554 /* Get the device handle */
@@ -56,11 +558,90 @@ int radeon_acpi_init(struct radeon_device *rdev)
56 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle) 558 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
57 return 0; 559 return 0;
58 560
561 /* Call the ATCS method */
562 ret = radeon_atcs_verify_interface(handle, atcs);
563 if (ret) {
564 DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
565 }
566
59 /* Call the ATIF method */ 567 /* Call the ATIF method */
60 ret = radeon_atif_call(handle); 568 ret = radeon_atif_verify_interface(handle, atif);
61 if (ret) 569 if (ret) {
62 return ret; 570 DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
571 goto out;
572 }
573
574 if (atif->notifications.brightness_change) {
575 struct drm_encoder *tmp;
576 struct radeon_encoder *target = NULL;
577
578 /* Find the encoder controlling the brightness */
579 list_for_each_entry(tmp, &rdev->ddev->mode_config.encoder_list,
580 head) {
581 struct radeon_encoder *enc = to_radeon_encoder(tmp);
582
583 if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
584 enc->enc_priv) {
585 if (rdev->is_atom_bios) {
586 struct radeon_encoder_atom_dig *dig = enc->enc_priv;
587 if (dig->bl_dev) {
588 target = enc;
589 break;
590 }
591 } else {
592 struct radeon_encoder_lvds *dig = enc->enc_priv;
593 if (dig->bl_dev) {
594 target = enc;
595 break;
596 }
597 }
598 }
599 }
600
601 atif->encoder_for_bl = target;
602 if (!target) {
603 /* Brightness change notification is enabled, but we
604 * didn't find a backlight controller, this should
605 * never happen.
606 */
607 DRM_ERROR("Cannot find a backlight controller\n");
608 }
609 }
63 610
64 return 0; 611 if (atif->functions.sbios_requests && !atif->functions.system_params) {
612 /* XXX check this workraround, if sbios request function is
613 * present we have to see how it's configured in the system
614 * params
615 */
616 atif->functions.system_params = true;
617 }
618
619 if (atif->functions.system_params) {
620 ret = radeon_atif_get_notification_params(handle,
621 &atif->notification_cfg);
622 if (ret) {
623 DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
624 ret);
625 /* Disable notification */
626 atif->notification_cfg.enabled = false;
627 }
628 }
629
630out:
631 rdev->acpi_nb.notifier_call = radeon_acpi_event;
632 register_acpi_notifier(&rdev->acpi_nb);
633
634 return ret;
65} 635}
66 636
637/**
638 * radeon_acpi_fini - tear down driver acpi support
639 *
640 * @rdev: radeon_device pointer
641 *
642 * Unregisters with the acpi notifier chain (all asics).
643 */
644void radeon_acpi_fini(struct radeon_device *rdev)
645{
646 unregister_acpi_notifier(&rdev->acpi_nb);
647}
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h
new file mode 100644
index 000000000000..be4af76f213d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.h
@@ -0,0 +1,445 @@
1/*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef RADEON_ACPI_H
25#define RADEON_ACPI_H
26
27struct radeon_device;
28struct acpi_bus_event;
29
30int radeon_atif_handler(struct radeon_device *rdev,
31 struct acpi_bus_event *event);
32
33/* AMD hw uses four ACPI control methods:
34 * 1. ATIF
35 * ARG0: (ACPI_INTEGER) function code
36 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
37 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
38 * ATIF provides an entry point for the gfx driver to interact with the sbios.
39 * The AMD ACPI notification mechanism uses Notify (VGA, 0x81) or a custom
40 * notification. Which notification is used as indicated by the ATIF Control
41 * Method GET_SYSTEM_PARAMETERS. When the driver receives Notify (VGA, 0x81) or
42 * a custom notification it invokes ATIF Control Method GET_SYSTEM_BIOS_REQUESTS
43 * to identify pending System BIOS requests and associated parameters. For
44 * example, if one of the pending requests is DISPLAY_SWITCH_REQUEST, the driver
45 * will perform display device detection and invoke ATIF Control Method
46 * SELECT_ACTIVE_DISPLAYS.
47 *
48 * 2. ATPX
49 * ARG0: (ACPI_INTEGER) function code
50 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
51 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
52 * ATPX methods are used on PowerXpress systems to handle mux switching and
53 * discrete GPU power control.
54 *
55 * 3. ATRM
56 * ARG0: (ACPI_INTEGER) offset of vbios rom data
57 * ARG1: (ACPI_BUFFER) size of the buffer to fill (up to 4K).
58 * OUTPUT: (ACPI_BUFFER) output buffer
59 * ATRM provides an interfacess to access the discrete GPU vbios image on
60 * PowerXpress systems with multiple GPUs.
61 *
62 * 4. ATCS
63 * ARG0: (ACPI_INTEGER) function code
64 * ARG1: (ACPI_BUFFER) parameter buffer, 256 bytes
65 * OUTPUT: (ACPI_BUFFER) output buffer, 256 bytes
66 * ATCS provides an interface to AMD chipset specific functionality.
67 *
68 */
69/* ATIF */
70#define ATIF_FUNCTION_VERIFY_INTERFACE 0x0
71/* ARG0: ATIF_FUNCTION_VERIFY_INTERFACE
72 * ARG1: none
73 * OUTPUT:
74 * WORD - structure size in bytes (includes size field)
75 * WORD - version
76 * DWORD - supported notifications mask
77 * DWORD - supported functions bit vector
78 */
79/* Notifications mask */
80# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
81# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
82# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
83# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
84# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
85# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
86# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
87# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
88# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
89/* supported functions vector */
90# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
91# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
92# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
93# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
94# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
95# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
96# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
97# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
98# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
99# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
100#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
101/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
102 * ARG1: none
103 * OUTPUT:
104 * WORD - structure size in bytes (includes size field)
105 * DWORD - valid flags mask
106 * DWORD - flags
107 *
108 * OR
109 *
110 * WORD - structure size in bytes (includes size field)
111 * DWORD - valid flags mask
112 * DWORD - flags
113 * BYTE - notify command code
114 *
115 * flags
116 * bits 1:0:
117 * 0 - Notify(VGA, 0x81) is not used for notification
118 * 1 - Notify(VGA, 0x81) is used for notification
119 * 2 - Notify(VGA, n) is used for notification where
120 * n (0xd0-0xd9) is specified in notify command code.
121 * bit 2:
122 * 1 - lid changes not reported though int10
123 */
124#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
125/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
126 * ARG1: none
127 * OUTPUT:
128 * WORD - structure size in bytes (includes size field)
129 * DWORD - pending sbios requests
130 * BYTE - panel expansion mode
131 * BYTE - thermal state: target gfx controller
132 * BYTE - thermal state: state id (0: exit state, non-0: state)
133 * BYTE - forced power state: target gfx controller
134 * BYTE - forced power state: state id
135 * BYTE - system power source
136 * BYTE - panel backlight level (0-255)
137 */
138/* pending sbios requests */
139# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
140# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
141# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
142# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
143# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
144# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
145# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
146# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
147# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
148/* panel expansion mode */
149# define ATIF_PANEL_EXPANSION_DISABLE 0
150# define ATIF_PANEL_EXPANSION_FULL 1
151# define ATIF_PANEL_EXPANSION_ASPECT 2
152/* target gfx controller */
153# define ATIF_TARGET_GFX_SINGLE 0
154# define ATIF_TARGET_GFX_PX_IGPU 1
155# define ATIF_TARGET_GFX_PX_DGPU 2
156/* system power source */
157# define ATIF_POWER_SOURCE_AC 1
158# define ATIF_POWER_SOURCE_DC 2
159# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
160# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
161#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
162/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
163 * ARG1:
164 * WORD - structure size in bytes (includes size field)
165 * WORD - selected displays
166 * WORD - connected displays
167 * OUTPUT:
168 * WORD - structure size in bytes (includes size field)
169 * WORD - selected displays
170 */
171# define ATIF_LCD1 (1 << 0)
172# define ATIF_CRT1 (1 << 1)
173# define ATIF_TV (1 << 2)
174# define ATIF_DFP1 (1 << 3)
175# define ATIF_CRT2 (1 << 4)
176# define ATIF_LCD2 (1 << 5)
177# define ATIF_DFP2 (1 << 7)
178# define ATIF_CV (1 << 8)
179# define ATIF_DFP3 (1 << 9)
180# define ATIF_DFP4 (1 << 10)
181# define ATIF_DFP5 (1 << 11)
182# define ATIF_DFP6 (1 << 12)
183#define ATIF_FUNCTION_GET_LID_STATE 0x4
184/* ARG0: ATIF_FUNCTION_GET_LID_STATE
185 * ARG1: none
186 * OUTPUT:
187 * WORD - structure size in bytes (includes size field)
188 * BYTE - lid state (0: open, 1: closed)
189 *
190 * GET_LID_STATE only works at boot and resume, for general lid
191 * status, use the kernel provided status
192 */
193#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
194/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
195 * ARG1: none
196 * OUTPUT:
197 * WORD - structure size in bytes (includes size field)
198 * BYTE - 0
199 * BYTE - TV standard
200 */
201# define ATIF_TV_STD_NTSC 0
202# define ATIF_TV_STD_PAL 1
203# define ATIF_TV_STD_PALM 2
204# define ATIF_TV_STD_PAL60 3
205# define ATIF_TV_STD_NTSCJ 4
206# define ATIF_TV_STD_PALCN 5
207# define ATIF_TV_STD_PALN 6
208# define ATIF_TV_STD_SCART_RGB 9
209#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
210/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
211 * ARG1:
212 * WORD - structure size in bytes (includes size field)
213 * BYTE - 0
214 * BYTE - TV standard
215 * OUTPUT: none
216 */
217#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
218/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
219 * ARG1: none
220 * OUTPUT:
221 * WORD - structure size in bytes (includes size field)
222 * BYTE - panel expansion mode
223 */
224#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
225/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
226 * ARG1:
227 * WORD - structure size in bytes (includes size field)
228 * BYTE - panel expansion mode
229 * OUTPUT: none
230 */
231#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
232/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
233 * ARG1:
234 * WORD - structure size in bytes (includes size field)
235 * WORD - gfx controller id
236 * BYTE - current temperature (degress Celsius)
237 * OUTPUT: none
238 */
239#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
240/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
241 * ARG1: none
242 * OUTPUT:
243 * WORD - number of gfx devices
244 * WORD - device structure size in bytes (excludes device size field)
245 * DWORD - flags \
246 * WORD - bus number } repeated structure
247 * WORD - device number /
248 */
249/* flags */
250# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
251# define ATIF_XGP_PORT (1 << 1)
252# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
253# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
254
255/* ATPX */
256#define ATPX_FUNCTION_VERIFY_INTERFACE 0x0
257/* ARG0: ATPX_FUNCTION_VERIFY_INTERFACE
258 * ARG1: none
259 * OUTPUT:
260 * WORD - structure size in bytes (includes size field)
261 * WORD - version
262 * DWORD - supported functions bit vector
263 */
264/* supported functions vector */
265# define ATPX_GET_PX_PARAMETERS_SUPPORTED (1 << 0)
266# define ATPX_POWER_CONTROL_SUPPORTED (1 << 1)
267# define ATPX_DISPLAY_MUX_CONTROL_SUPPORTED (1 << 2)
268# define ATPX_I2C_MUX_CONTROL_SUPPORTED (1 << 3)
269# define ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED (1 << 4)
270# define ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED (1 << 5)
271# define ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED (1 << 7)
272# define ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED (1 << 8)
273#define ATPX_FUNCTION_GET_PX_PARAMETERS 0x1
274/* ARG0: ATPX_FUNCTION_GET_PX_PARAMETERS
275 * ARG1: none
276 * OUTPUT:
277 * WORD - structure size in bytes (includes size field)
278 * DWORD - valid flags mask
279 * DWORD - flags
280 */
281/* flags */
282# define ATPX_LVDS_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 0)
283# define ATPX_CRT1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 1)
284# define ATPX_DVI1_I2C_AVAILABLE_TO_BOTH_GPUS (1 << 2)
285# define ATPX_CRT1_RGB_SIGNAL_MUXED (1 << 3)
286# define ATPX_TV_SIGNAL_MUXED (1 << 4)
287# define ATPX_DFP_SIGNAL_MUXED (1 << 5)
288# define ATPX_SEPARATE_MUX_FOR_I2C (1 << 6)
289# define ATPX_DYNAMIC_PX_SUPPORTED (1 << 7)
290# define ATPX_ACF_NOT_SUPPORTED (1 << 8)
291# define ATPX_FIXED_NOT_SUPPORTED (1 << 9)
292# define ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED (1 << 10)
293# define ATPX_DGPU_REQ_POWER_FOR_DISPLAYS (1 << 11)
294#define ATPX_FUNCTION_POWER_CONTROL 0x2
295/* ARG0: ATPX_FUNCTION_POWER_CONTROL
296 * ARG1:
297 * WORD - structure size in bytes (includes size field)
298 * BYTE - dGPU power state (0: power off, 1: power on)
299 * OUTPUT: none
300 */
301#define ATPX_FUNCTION_DISPLAY_MUX_CONTROL 0x3
302/* ARG0: ATPX_FUNCTION_DISPLAY_MUX_CONTROL
303 * ARG1:
304 * WORD - structure size in bytes (includes size field)
305 * WORD - display mux control (0: iGPU, 1: dGPU)
306 * OUTPUT: none
307 */
308# define ATPX_INTEGRATED_GPU 0
309# define ATPX_DISCRETE_GPU 1
310#define ATPX_FUNCTION_I2C_MUX_CONTROL 0x4
311/* ARG0: ATPX_FUNCTION_I2C_MUX_CONTROL
312 * ARG1:
313 * WORD - structure size in bytes (includes size field)
314 * WORD - i2c/aux/hpd mux control (0: iGPU, 1: dGPU)
315 * OUTPUT: none
316 */
317#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION 0x5
318/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION
319 * ARG1:
320 * WORD - structure size in bytes (includes size field)
321 * WORD - target gpu (0: iGPU, 1: dGPU)
322 * OUTPUT: none
323 */
324#define ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION 0x6
325/* ARG0: ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION
326 * ARG1:
327 * WORD - structure size in bytes (includes size field)
328 * WORD - target gpu (0: iGPU, 1: dGPU)
329 * OUTPUT: none
330 */
331#define ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING 0x8
332/* ARG0: ATPX_FUNCTION_GET_DISPLAY_CONNECTORS_MAPPING
333 * ARG1: none
334 * OUTPUT:
335 * WORD - number of display connectors
336 * WORD - connector structure size in bytes (excludes connector size field)
337 * BYTE - flags \
338 * BYTE - ATIF display vector bit position } repeated
339 * BYTE - adapter id (0: iGPU, 1-n: dGPU ordered by pcie bus number) } structure
340 * WORD - connector ACPI id /
341 */
342/* flags */
343# define ATPX_DISPLAY_OUTPUT_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 0)
344# define ATPX_DISPLAY_HPD_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 1)
345# define ATPX_DISPLAY_I2C_SUPPORTED_BY_ADAPTER_ID_DEVICE (1 << 2)
346#define ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS 0x9
347/* ARG0: ATPX_FUNCTION_GET_DISPLAY_DETECTION_PORTS
348 * ARG1: none
349 * OUTPUT:
350 * WORD - number of HPD/DDC ports
351 * WORD - port structure size in bytes (excludes port size field)
352 * BYTE - ATIF display vector bit position \
353 * BYTE - hpd id } reapeated structure
354 * BYTE - ddc id /
355 *
356 * available on A+A systems only
357 */
358/* hpd id */
359# define ATPX_HPD_NONE 0
360# define ATPX_HPD1 1
361# define ATPX_HPD2 2
362# define ATPX_HPD3 3
363# define ATPX_HPD4 4
364# define ATPX_HPD5 5
365# define ATPX_HPD6 6
366/* ddc id */
367# define ATPX_DDC_NONE 0
368# define ATPX_DDC1 1
369# define ATPX_DDC2 2
370# define ATPX_DDC3 3
371# define ATPX_DDC4 4
372# define ATPX_DDC5 5
373# define ATPX_DDC6 6
374# define ATPX_DDC7 7
375# define ATPX_DDC8 8
376
377/* ATCS */
378#define ATCS_FUNCTION_VERIFY_INTERFACE 0x0
379/* ARG0: ATCS_FUNCTION_VERIFY_INTERFACE
380 * ARG1: none
381 * OUTPUT:
382 * WORD - structure size in bytes (includes size field)
383 * WORD - version
384 * DWORD - supported functions bit vector
385 */
386/* supported functions vector */
387# define ATCS_GET_EXTERNAL_STATE_SUPPORTED (1 << 0)
388# define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED (1 << 1)
389# define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED (1 << 2)
390# define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED (1 << 3)
391#define ATCS_FUNCTION_GET_EXTERNAL_STATE 0x1
392/* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
393 * ARG1: none
394 * OUTPUT:
395 * WORD - structure size in bytes (includes size field)
396 * DWORD - valid flags mask
397 * DWORD - flags (0: undocked, 1: docked)
398 */
399/* flags */
400# define ATCS_DOCKED (1 << 0)
401#define ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST 0x2
402/* ARG0: ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST
403 * ARG1:
404 * WORD - structure size in bytes (includes size field)
405 * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
406 * WORD - valid flags mask
407 * WORD - flags
408 * BYTE - request type
409 * BYTE - performance request
410 * OUTPUT:
411 * WORD - structure size in bytes (includes size field)
412 * BYTE - return value
413 */
414/* flags */
415# define ATCS_ADVERTISE_CAPS (1 << 0)
416# define ATCS_WAIT_FOR_COMPLETION (1 << 1)
417/* request type */
418# define ATCS_PCIE_LINK_SPEED 1
419/* performance request */
420# define ATCS_REMOVE 0
421# define ATCS_FORCE_LOW_POWER 1
422# define ATCS_PERF_LEVEL_1 2 /* PCIE Gen 1 */
423# define ATCS_PERF_LEVEL_2 3 /* PCIE Gen 2 */
424# define ATCS_PERF_LEVEL_3 4 /* PCIE Gen 3 */
425/* return value */
426# define ATCS_REQUEST_REFUSED 1
427# define ATCS_REQUEST_COMPLETE 2
428# define ATCS_REQUEST_IN_PROGRESS 3
429#define ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION 0x3
430/* ARG0: ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION
431 * ARG1: none
432 * OUTPUT: none
433 */
434#define ATCS_FUNCTION_SET_PCIE_BUS_WIDTH 0x4
435/* ARG0: ATCS_FUNCTION_SET_PCIE_BUS_WIDTH
436 * ARG1:
437 * WORD - structure size in bytes (includes size field)
438 * WORD - client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
439 * BYTE - number of active lanes
440 * OUTPUT:
441 * WORD - structure size in bytes (includes size field)
442 * BYTE - number of active lanes
443 */
444
445#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 973417c4b014..654520b95ab7 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -198,6 +198,8 @@ static struct radeon_asic r100_asic = {
198 .bandwidth_update = &r100_bandwidth_update, 198 .bandwidth_update = &r100_bandwidth_update,
199 .get_vblank_counter = &r100_get_vblank_counter, 199 .get_vblank_counter = &r100_get_vblank_counter,
200 .wait_for_vblank = &r100_wait_for_vblank, 200 .wait_for_vblank = &r100_wait_for_vblank,
201 .set_backlight_level = &radeon_legacy_set_backlight_level,
202 .get_backlight_level = &radeon_legacy_get_backlight_level,
201 }, 203 },
202 .copy = { 204 .copy = {
203 .blit = &r100_copy_blit, 205 .blit = &r100_copy_blit,
@@ -272,6 +274,8 @@ static struct radeon_asic r200_asic = {
272 .bandwidth_update = &r100_bandwidth_update, 274 .bandwidth_update = &r100_bandwidth_update,
273 .get_vblank_counter = &r100_get_vblank_counter, 275 .get_vblank_counter = &r100_get_vblank_counter,
274 .wait_for_vblank = &r100_wait_for_vblank, 276 .wait_for_vblank = &r100_wait_for_vblank,
277 .set_backlight_level = &radeon_legacy_set_backlight_level,
278 .get_backlight_level = &radeon_legacy_get_backlight_level,
275 }, 279 },
276 .copy = { 280 .copy = {
277 .blit = &r100_copy_blit, 281 .blit = &r100_copy_blit,
@@ -346,6 +350,8 @@ static struct radeon_asic r300_asic = {
346 .bandwidth_update = &r100_bandwidth_update, 350 .bandwidth_update = &r100_bandwidth_update,
347 .get_vblank_counter = &r100_get_vblank_counter, 351 .get_vblank_counter = &r100_get_vblank_counter,
348 .wait_for_vblank = &r100_wait_for_vblank, 352 .wait_for_vblank = &r100_wait_for_vblank,
353 .set_backlight_level = &radeon_legacy_set_backlight_level,
354 .get_backlight_level = &radeon_legacy_get_backlight_level,
349 }, 355 },
350 .copy = { 356 .copy = {
351 .blit = &r100_copy_blit, 357 .blit = &r100_copy_blit,
@@ -420,6 +426,8 @@ static struct radeon_asic r300_asic_pcie = {
420 .bandwidth_update = &r100_bandwidth_update, 426 .bandwidth_update = &r100_bandwidth_update,
421 .get_vblank_counter = &r100_get_vblank_counter, 427 .get_vblank_counter = &r100_get_vblank_counter,
422 .wait_for_vblank = &r100_wait_for_vblank, 428 .wait_for_vblank = &r100_wait_for_vblank,
429 .set_backlight_level = &radeon_legacy_set_backlight_level,
430 .get_backlight_level = &radeon_legacy_get_backlight_level,
423 }, 431 },
424 .copy = { 432 .copy = {
425 .blit = &r100_copy_blit, 433 .blit = &r100_copy_blit,
@@ -494,6 +502,8 @@ static struct radeon_asic r420_asic = {
494 .bandwidth_update = &r100_bandwidth_update, 502 .bandwidth_update = &r100_bandwidth_update,
495 .get_vblank_counter = &r100_get_vblank_counter, 503 .get_vblank_counter = &r100_get_vblank_counter,
496 .wait_for_vblank = &r100_wait_for_vblank, 504 .wait_for_vblank = &r100_wait_for_vblank,
505 .set_backlight_level = &atombios_set_backlight_level,
506 .get_backlight_level = &atombios_get_backlight_level,
497 }, 507 },
498 .copy = { 508 .copy = {
499 .blit = &r100_copy_blit, 509 .blit = &r100_copy_blit,
@@ -568,6 +578,8 @@ static struct radeon_asic rs400_asic = {
568 .bandwidth_update = &r100_bandwidth_update, 578 .bandwidth_update = &r100_bandwidth_update,
569 .get_vblank_counter = &r100_get_vblank_counter, 579 .get_vblank_counter = &r100_get_vblank_counter,
570 .wait_for_vblank = &r100_wait_for_vblank, 580 .wait_for_vblank = &r100_wait_for_vblank,
581 .set_backlight_level = &radeon_legacy_set_backlight_level,
582 .get_backlight_level = &radeon_legacy_get_backlight_level,
571 }, 583 },
572 .copy = { 584 .copy = {
573 .blit = &r100_copy_blit, 585 .blit = &r100_copy_blit,
@@ -642,6 +654,8 @@ static struct radeon_asic rs600_asic = {
642 .bandwidth_update = &rs600_bandwidth_update, 654 .bandwidth_update = &rs600_bandwidth_update,
643 .get_vblank_counter = &rs600_get_vblank_counter, 655 .get_vblank_counter = &rs600_get_vblank_counter,
644 .wait_for_vblank = &avivo_wait_for_vblank, 656 .wait_for_vblank = &avivo_wait_for_vblank,
657 .set_backlight_level = &atombios_set_backlight_level,
658 .get_backlight_level = &atombios_get_backlight_level,
645 }, 659 },
646 .copy = { 660 .copy = {
647 .blit = &r100_copy_blit, 661 .blit = &r100_copy_blit,
@@ -716,6 +730,8 @@ static struct radeon_asic rs690_asic = {
716 .get_vblank_counter = &rs600_get_vblank_counter, 730 .get_vblank_counter = &rs600_get_vblank_counter,
717 .bandwidth_update = &rs690_bandwidth_update, 731 .bandwidth_update = &rs690_bandwidth_update,
718 .wait_for_vblank = &avivo_wait_for_vblank, 732 .wait_for_vblank = &avivo_wait_for_vblank,
733 .set_backlight_level = &atombios_set_backlight_level,
734 .get_backlight_level = &atombios_get_backlight_level,
719 }, 735 },
720 .copy = { 736 .copy = {
721 .blit = &r100_copy_blit, 737 .blit = &r100_copy_blit,
@@ -790,6 +806,8 @@ static struct radeon_asic rv515_asic = {
790 .get_vblank_counter = &rs600_get_vblank_counter, 806 .get_vblank_counter = &rs600_get_vblank_counter,
791 .bandwidth_update = &rv515_bandwidth_update, 807 .bandwidth_update = &rv515_bandwidth_update,
792 .wait_for_vblank = &avivo_wait_for_vblank, 808 .wait_for_vblank = &avivo_wait_for_vblank,
809 .set_backlight_level = &atombios_set_backlight_level,
810 .get_backlight_level = &atombios_get_backlight_level,
793 }, 811 },
794 .copy = { 812 .copy = {
795 .blit = &r100_copy_blit, 813 .blit = &r100_copy_blit,
@@ -864,6 +882,8 @@ static struct radeon_asic r520_asic = {
864 .bandwidth_update = &rv515_bandwidth_update, 882 .bandwidth_update = &rv515_bandwidth_update,
865 .get_vblank_counter = &rs600_get_vblank_counter, 883 .get_vblank_counter = &rs600_get_vblank_counter,
866 .wait_for_vblank = &avivo_wait_for_vblank, 884 .wait_for_vblank = &avivo_wait_for_vblank,
885 .set_backlight_level = &atombios_set_backlight_level,
886 .get_backlight_level = &atombios_get_backlight_level,
867 }, 887 },
868 .copy = { 888 .copy = {
869 .blit = &r100_copy_blit, 889 .blit = &r100_copy_blit,
@@ -937,6 +957,8 @@ static struct radeon_asic r600_asic = {
937 .bandwidth_update = &rv515_bandwidth_update, 957 .bandwidth_update = &rv515_bandwidth_update,
938 .get_vblank_counter = &rs600_get_vblank_counter, 958 .get_vblank_counter = &rs600_get_vblank_counter,
939 .wait_for_vblank = &avivo_wait_for_vblank, 959 .wait_for_vblank = &avivo_wait_for_vblank,
960 .set_backlight_level = &atombios_set_backlight_level,
961 .get_backlight_level = &atombios_get_backlight_level,
940 }, 962 },
941 .copy = { 963 .copy = {
942 .blit = &r600_copy_blit, 964 .blit = &r600_copy_blit,
@@ -1010,6 +1032,8 @@ static struct radeon_asic rs780_asic = {
1010 .bandwidth_update = &rs690_bandwidth_update, 1032 .bandwidth_update = &rs690_bandwidth_update,
1011 .get_vblank_counter = &rs600_get_vblank_counter, 1033 .get_vblank_counter = &rs600_get_vblank_counter,
1012 .wait_for_vblank = &avivo_wait_for_vblank, 1034 .wait_for_vblank = &avivo_wait_for_vblank,
1035 .set_backlight_level = &atombios_set_backlight_level,
1036 .get_backlight_level = &atombios_get_backlight_level,
1013 }, 1037 },
1014 .copy = { 1038 .copy = {
1015 .blit = &r600_copy_blit, 1039 .blit = &r600_copy_blit,
@@ -1083,6 +1107,8 @@ static struct radeon_asic rv770_asic = {
1083 .bandwidth_update = &rv515_bandwidth_update, 1107 .bandwidth_update = &rv515_bandwidth_update,
1084 .get_vblank_counter = &rs600_get_vblank_counter, 1108 .get_vblank_counter = &rs600_get_vblank_counter,
1085 .wait_for_vblank = &avivo_wait_for_vblank, 1109 .wait_for_vblank = &avivo_wait_for_vblank,
1110 .set_backlight_level = &atombios_set_backlight_level,
1111 .get_backlight_level = &atombios_get_backlight_level,
1086 }, 1112 },
1087 .copy = { 1113 .copy = {
1088 .blit = &r600_copy_blit, 1114 .blit = &r600_copy_blit,
@@ -1156,6 +1182,8 @@ static struct radeon_asic evergreen_asic = {
1156 .bandwidth_update = &evergreen_bandwidth_update, 1182 .bandwidth_update = &evergreen_bandwidth_update,
1157 .get_vblank_counter = &evergreen_get_vblank_counter, 1183 .get_vblank_counter = &evergreen_get_vblank_counter,
1158 .wait_for_vblank = &dce4_wait_for_vblank, 1184 .wait_for_vblank = &dce4_wait_for_vblank,
1185 .set_backlight_level = &atombios_set_backlight_level,
1186 .get_backlight_level = &atombios_get_backlight_level,
1159 }, 1187 },
1160 .copy = { 1188 .copy = {
1161 .blit = &r600_copy_blit, 1189 .blit = &r600_copy_blit,
@@ -1229,6 +1257,8 @@ static struct radeon_asic sumo_asic = {
1229 .bandwidth_update = &evergreen_bandwidth_update, 1257 .bandwidth_update = &evergreen_bandwidth_update,
1230 .get_vblank_counter = &evergreen_get_vblank_counter, 1258 .get_vblank_counter = &evergreen_get_vblank_counter,
1231 .wait_for_vblank = &dce4_wait_for_vblank, 1259 .wait_for_vblank = &dce4_wait_for_vblank,
1260 .set_backlight_level = &atombios_set_backlight_level,
1261 .get_backlight_level = &atombios_get_backlight_level,
1232 }, 1262 },
1233 .copy = { 1263 .copy = {
1234 .blit = &r600_copy_blit, 1264 .blit = &r600_copy_blit,
@@ -1302,6 +1332,8 @@ static struct radeon_asic btc_asic = {
1302 .bandwidth_update = &evergreen_bandwidth_update, 1332 .bandwidth_update = &evergreen_bandwidth_update,
1303 .get_vblank_counter = &evergreen_get_vblank_counter, 1333 .get_vblank_counter = &evergreen_get_vblank_counter,
1304 .wait_for_vblank = &dce4_wait_for_vblank, 1334 .wait_for_vblank = &dce4_wait_for_vblank,
1335 .set_backlight_level = &atombios_set_backlight_level,
1336 .get_backlight_level = &atombios_get_backlight_level,
1305 }, 1337 },
1306 .copy = { 1338 .copy = {
1307 .blit = &r600_copy_blit, 1339 .blit = &r600_copy_blit,
@@ -1325,7 +1357,7 @@ static struct radeon_asic btc_asic = {
1325 .misc = &evergreen_pm_misc, 1357 .misc = &evergreen_pm_misc,
1326 .prepare = &evergreen_pm_prepare, 1358 .prepare = &evergreen_pm_prepare,
1327 .finish = &evergreen_pm_finish, 1359 .finish = &evergreen_pm_finish,
1328 .init_profile = &r600_pm_init_profile, 1360 .init_profile = &btc_pm_init_profile,
1329 .get_dynpm_state = &r600_pm_get_dynpm_state, 1361 .get_dynpm_state = &r600_pm_get_dynpm_state,
1330 .get_engine_clock = &radeon_atom_get_engine_clock, 1362 .get_engine_clock = &radeon_atom_get_engine_clock,
1331 .set_engine_clock = &radeon_atom_set_engine_clock, 1363 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1342,16 +1374,6 @@ static struct radeon_asic btc_asic = {
1342 }, 1374 },
1343}; 1375};
1344 1376
1345static const struct radeon_vm_funcs cayman_vm_funcs = {
1346 .init = &cayman_vm_init,
1347 .fini = &cayman_vm_fini,
1348 .bind = &cayman_vm_bind,
1349 .unbind = &cayman_vm_unbind,
1350 .tlb_flush = &cayman_vm_tlb_flush,
1351 .page_flags = &cayman_vm_page_flags,
1352 .set_page = &cayman_vm_set_page,
1353};
1354
1355static struct radeon_asic cayman_asic = { 1377static struct radeon_asic cayman_asic = {
1356 .init = &cayman_init, 1378 .init = &cayman_init,
1357 .fini = &cayman_fini, 1379 .fini = &cayman_fini,
@@ -1366,6 +1388,12 @@ static struct radeon_asic cayman_asic = {
1366 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1388 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1367 .set_page = &rs600_gart_set_page, 1389 .set_page = &rs600_gart_set_page,
1368 }, 1390 },
1391 .vm = {
1392 .init = &cayman_vm_init,
1393 .fini = &cayman_vm_fini,
1394 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1395 .set_page = &cayman_vm_set_page,
1396 },
1369 .ring = { 1397 .ring = {
1370 [RADEON_RING_TYPE_GFX_INDEX] = { 1398 [RADEON_RING_TYPE_GFX_INDEX] = {
1371 .ib_execute = &cayman_ring_ib_execute, 1399 .ib_execute = &cayman_ring_ib_execute,
@@ -1376,6 +1404,7 @@ static struct radeon_asic cayman_asic = {
1376 .ring_test = &r600_ring_test, 1404 .ring_test = &r600_ring_test,
1377 .ib_test = &r600_ib_test, 1405 .ib_test = &r600_ib_test,
1378 .is_lockup = &evergreen_gpu_is_lockup, 1406 .is_lockup = &evergreen_gpu_is_lockup,
1407 .vm_flush = &cayman_vm_flush,
1379 }, 1408 },
1380 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1409 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1381 .ib_execute = &cayman_ring_ib_execute, 1410 .ib_execute = &cayman_ring_ib_execute,
@@ -1386,6 +1415,7 @@ static struct radeon_asic cayman_asic = {
1386 .ring_test = &r600_ring_test, 1415 .ring_test = &r600_ring_test,
1387 .ib_test = &r600_ib_test, 1416 .ib_test = &r600_ib_test,
1388 .is_lockup = &evergreen_gpu_is_lockup, 1417 .is_lockup = &evergreen_gpu_is_lockup,
1418 .vm_flush = &cayman_vm_flush,
1389 }, 1419 },
1390 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1420 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1391 .ib_execute = &cayman_ring_ib_execute, 1421 .ib_execute = &cayman_ring_ib_execute,
@@ -1396,6 +1426,7 @@ static struct radeon_asic cayman_asic = {
1396 .ring_test = &r600_ring_test, 1426 .ring_test = &r600_ring_test,
1397 .ib_test = &r600_ib_test, 1427 .ib_test = &r600_ib_test,
1398 .is_lockup = &evergreen_gpu_is_lockup, 1428 .is_lockup = &evergreen_gpu_is_lockup,
1429 .vm_flush = &cayman_vm_flush,
1399 } 1430 }
1400 }, 1431 },
1401 .irq = { 1432 .irq = {
@@ -1406,6 +1437,8 @@ static struct radeon_asic cayman_asic = {
1406 .bandwidth_update = &evergreen_bandwidth_update, 1437 .bandwidth_update = &evergreen_bandwidth_update,
1407 .get_vblank_counter = &evergreen_get_vblank_counter, 1438 .get_vblank_counter = &evergreen_get_vblank_counter,
1408 .wait_for_vblank = &dce4_wait_for_vblank, 1439 .wait_for_vblank = &dce4_wait_for_vblank,
1440 .set_backlight_level = &atombios_set_backlight_level,
1441 .get_backlight_level = &atombios_get_backlight_level,
1409 }, 1442 },
1410 .copy = { 1443 .copy = {
1411 .blit = &r600_copy_blit, 1444 .blit = &r600_copy_blit,
@@ -1429,7 +1462,7 @@ static struct radeon_asic cayman_asic = {
1429 .misc = &evergreen_pm_misc, 1462 .misc = &evergreen_pm_misc,
1430 .prepare = &evergreen_pm_prepare, 1463 .prepare = &evergreen_pm_prepare,
1431 .finish = &evergreen_pm_finish, 1464 .finish = &evergreen_pm_finish,
1432 .init_profile = &r600_pm_init_profile, 1465 .init_profile = &btc_pm_init_profile,
1433 .get_dynpm_state = &r600_pm_get_dynpm_state, 1466 .get_dynpm_state = &r600_pm_get_dynpm_state,
1434 .get_engine_clock = &radeon_atom_get_engine_clock, 1467 .get_engine_clock = &radeon_atom_get_engine_clock,
1435 .set_engine_clock = &radeon_atom_set_engine_clock, 1468 .set_engine_clock = &radeon_atom_set_engine_clock,
@@ -1460,6 +1493,12 @@ static struct radeon_asic trinity_asic = {
1460 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1493 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1461 .set_page = &rs600_gart_set_page, 1494 .set_page = &rs600_gart_set_page,
1462 }, 1495 },
1496 .vm = {
1497 .init = &cayman_vm_init,
1498 .fini = &cayman_vm_fini,
1499 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1500 .set_page = &cayman_vm_set_page,
1501 },
1463 .ring = { 1502 .ring = {
1464 [RADEON_RING_TYPE_GFX_INDEX] = { 1503 [RADEON_RING_TYPE_GFX_INDEX] = {
1465 .ib_execute = &cayman_ring_ib_execute, 1504 .ib_execute = &cayman_ring_ib_execute,
@@ -1470,6 +1509,7 @@ static struct radeon_asic trinity_asic = {
1470 .ring_test = &r600_ring_test, 1509 .ring_test = &r600_ring_test,
1471 .ib_test = &r600_ib_test, 1510 .ib_test = &r600_ib_test,
1472 .is_lockup = &evergreen_gpu_is_lockup, 1511 .is_lockup = &evergreen_gpu_is_lockup,
1512 .vm_flush = &cayman_vm_flush,
1473 }, 1513 },
1474 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1514 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1475 .ib_execute = &cayman_ring_ib_execute, 1515 .ib_execute = &cayman_ring_ib_execute,
@@ -1480,6 +1520,7 @@ static struct radeon_asic trinity_asic = {
1480 .ring_test = &r600_ring_test, 1520 .ring_test = &r600_ring_test,
1481 .ib_test = &r600_ib_test, 1521 .ib_test = &r600_ib_test,
1482 .is_lockup = &evergreen_gpu_is_lockup, 1522 .is_lockup = &evergreen_gpu_is_lockup,
1523 .vm_flush = &cayman_vm_flush,
1483 }, 1524 },
1484 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1525 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1485 .ib_execute = &cayman_ring_ib_execute, 1526 .ib_execute = &cayman_ring_ib_execute,
@@ -1490,6 +1531,7 @@ static struct radeon_asic trinity_asic = {
1490 .ring_test = &r600_ring_test, 1531 .ring_test = &r600_ring_test,
1491 .ib_test = &r600_ib_test, 1532 .ib_test = &r600_ib_test,
1492 .is_lockup = &evergreen_gpu_is_lockup, 1533 .is_lockup = &evergreen_gpu_is_lockup,
1534 .vm_flush = &cayman_vm_flush,
1493 } 1535 }
1494 }, 1536 },
1495 .irq = { 1537 .irq = {
@@ -1500,6 +1542,8 @@ static struct radeon_asic trinity_asic = {
1500 .bandwidth_update = &dce6_bandwidth_update, 1542 .bandwidth_update = &dce6_bandwidth_update,
1501 .get_vblank_counter = &evergreen_get_vblank_counter, 1543 .get_vblank_counter = &evergreen_get_vblank_counter,
1502 .wait_for_vblank = &dce4_wait_for_vblank, 1544 .wait_for_vblank = &dce4_wait_for_vblank,
1545 .set_backlight_level = &atombios_set_backlight_level,
1546 .get_backlight_level = &atombios_get_backlight_level,
1503 }, 1547 },
1504 .copy = { 1548 .copy = {
1505 .blit = &r600_copy_blit, 1549 .blit = &r600_copy_blit,
@@ -1540,16 +1584,6 @@ static struct radeon_asic trinity_asic = {
1540 }, 1584 },
1541}; 1585};
1542 1586
1543static const struct radeon_vm_funcs si_vm_funcs = {
1544 .init = &si_vm_init,
1545 .fini = &si_vm_fini,
1546 .bind = &si_vm_bind,
1547 .unbind = &si_vm_unbind,
1548 .tlb_flush = &si_vm_tlb_flush,
1549 .page_flags = &cayman_vm_page_flags,
1550 .set_page = &cayman_vm_set_page,
1551};
1552
1553static struct radeon_asic si_asic = { 1587static struct radeon_asic si_asic = {
1554 .init = &si_init, 1588 .init = &si_init,
1555 .fini = &si_fini, 1589 .fini = &si_fini,
@@ -1564,6 +1598,12 @@ static struct radeon_asic si_asic = {
1564 .tlb_flush = &si_pcie_gart_tlb_flush, 1598 .tlb_flush = &si_pcie_gart_tlb_flush,
1565 .set_page = &rs600_gart_set_page, 1599 .set_page = &rs600_gart_set_page,
1566 }, 1600 },
1601 .vm = {
1602 .init = &si_vm_init,
1603 .fini = &si_vm_fini,
1604 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1605 .set_page = &si_vm_set_page,
1606 },
1567 .ring = { 1607 .ring = {
1568 [RADEON_RING_TYPE_GFX_INDEX] = { 1608 [RADEON_RING_TYPE_GFX_INDEX] = {
1569 .ib_execute = &si_ring_ib_execute, 1609 .ib_execute = &si_ring_ib_execute,
@@ -1574,6 +1614,7 @@ static struct radeon_asic si_asic = {
1574 .ring_test = &r600_ring_test, 1614 .ring_test = &r600_ring_test,
1575 .ib_test = &r600_ib_test, 1615 .ib_test = &r600_ib_test,
1576 .is_lockup = &si_gpu_is_lockup, 1616 .is_lockup = &si_gpu_is_lockup,
1617 .vm_flush = &si_vm_flush,
1577 }, 1618 },
1578 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1619 [CAYMAN_RING_TYPE_CP1_INDEX] = {
1579 .ib_execute = &si_ring_ib_execute, 1620 .ib_execute = &si_ring_ib_execute,
@@ -1584,6 +1625,7 @@ static struct radeon_asic si_asic = {
1584 .ring_test = &r600_ring_test, 1625 .ring_test = &r600_ring_test,
1585 .ib_test = &r600_ib_test, 1626 .ib_test = &r600_ib_test,
1586 .is_lockup = &si_gpu_is_lockup, 1627 .is_lockup = &si_gpu_is_lockup,
1628 .vm_flush = &si_vm_flush,
1587 }, 1629 },
1588 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1630 [CAYMAN_RING_TYPE_CP2_INDEX] = {
1589 .ib_execute = &si_ring_ib_execute, 1631 .ib_execute = &si_ring_ib_execute,
@@ -1594,6 +1636,7 @@ static struct radeon_asic si_asic = {
1594 .ring_test = &r600_ring_test, 1636 .ring_test = &r600_ring_test,
1595 .ib_test = &r600_ib_test, 1637 .ib_test = &r600_ib_test,
1596 .is_lockup = &si_gpu_is_lockup, 1638 .is_lockup = &si_gpu_is_lockup,
1639 .vm_flush = &si_vm_flush,
1597 } 1640 }
1598 }, 1641 },
1599 .irq = { 1642 .irq = {
@@ -1604,6 +1647,8 @@ static struct radeon_asic si_asic = {
1604 .bandwidth_update = &dce6_bandwidth_update, 1647 .bandwidth_update = &dce6_bandwidth_update,
1605 .get_vblank_counter = &evergreen_get_vblank_counter, 1648 .get_vblank_counter = &evergreen_get_vblank_counter,
1606 .wait_for_vblank = &dce4_wait_for_vblank, 1649 .wait_for_vblank = &dce4_wait_for_vblank,
1650 .set_backlight_level = &atombios_set_backlight_level,
1651 .get_backlight_level = &atombios_get_backlight_level,
1607 }, 1652 },
1608 .copy = { 1653 .copy = {
1609 .blit = NULL, 1654 .blit = NULL,
@@ -1697,6 +1742,7 @@ int radeon_asic_init(struct radeon_device *rdev)
1697 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; 1742 rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
1698 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; 1743 rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
1699 rdev->asic->pm.set_memory_clock = NULL; 1744 rdev->asic->pm.set_memory_clock = NULL;
1745 rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
1700 } 1746 }
1701 break; 1747 break;
1702 case CHIP_RS400: 1748 case CHIP_RS400:
@@ -1769,13 +1815,11 @@ int radeon_asic_init(struct radeon_device *rdev)
1769 rdev->asic = &cayman_asic; 1815 rdev->asic = &cayman_asic;
1770 /* set num crtcs */ 1816 /* set num crtcs */
1771 rdev->num_crtc = 6; 1817 rdev->num_crtc = 6;
1772 rdev->vm_manager.funcs = &cayman_vm_funcs;
1773 break; 1818 break;
1774 case CHIP_ARUBA: 1819 case CHIP_ARUBA:
1775 rdev->asic = &trinity_asic; 1820 rdev->asic = &trinity_asic;
1776 /* set num crtcs */ 1821 /* set num crtcs */
1777 rdev->num_crtc = 4; 1822 rdev->num_crtc = 4;
1778 rdev->vm_manager.funcs = &cayman_vm_funcs;
1779 break; 1823 break;
1780 case CHIP_TAHITI: 1824 case CHIP_TAHITI:
1781 case CHIP_PITCAIRN: 1825 case CHIP_PITCAIRN:
@@ -1783,7 +1827,6 @@ int radeon_asic_init(struct radeon_device *rdev)
1783 rdev->asic = &si_asic; 1827 rdev->asic = &si_asic;
1784 /* set num crtcs */ 1828 /* set num crtcs */
1785 rdev->num_crtc = 6; 1829 rdev->num_crtc = 6;
1786 rdev->vm_manager.funcs = &si_vm_funcs;
1787 break; 1830 break;
1788 default: 1831 default:
1789 /* FIXME: not supported yet */ 1832 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 18c38d14c8cd..5e3a0e5c6be1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -42,6 +42,12 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev);
42void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock); 42void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 43void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
44 44
45void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
46u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
47void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
48u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
49
50
45/* 51/*
46 * r100,rv100,rs100,rv200,rs200 52 * r100,rv100,rs100,rv200,rs200
47 */ 53 */
@@ -389,6 +395,7 @@ void r700_cp_fini(struct radeon_device *rdev);
389struct evergreen_mc_save { 395struct evergreen_mc_save {
390 u32 vga_render_control; 396 u32 vga_render_control;
391 u32 vga_hdp_control; 397 u32 vga_hdp_control;
398 bool crtc_enabled[RADEON_MAX_CRTCS];
392}; 399};
393 400
394void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); 401void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -413,6 +420,7 @@ extern void evergreen_pm_misc(struct radeon_device *rdev);
413extern void evergreen_pm_prepare(struct radeon_device *rdev); 420extern void evergreen_pm_prepare(struct radeon_device *rdev);
414extern void evergreen_pm_finish(struct radeon_device *rdev); 421extern void evergreen_pm_finish(struct radeon_device *rdev);
415extern void sumo_pm_init_profile(struct radeon_device *rdev); 422extern void sumo_pm_init_profile(struct radeon_device *rdev);
423extern void btc_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 424extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 425extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 426extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
@@ -435,14 +443,11 @@ int cayman_asic_reset(struct radeon_device *rdev);
435void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 443void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
436int cayman_vm_init(struct radeon_device *rdev); 444int cayman_vm_init(struct radeon_device *rdev);
437void cayman_vm_fini(struct radeon_device *rdev); 445void cayman_vm_fini(struct radeon_device *rdev);
438int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); 446void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
439void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 447uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
440void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); 448void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
441uint32_t cayman_vm_page_flags(struct radeon_device *rdev, 449 uint64_t addr, unsigned count,
442 struct radeon_vm *vm, 450 uint32_t incr, uint32_t flags);
443 uint32_t flags);
444void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
445 unsigned pfn, uint64_t addr, uint32_t flags);
446int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 451int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
447 452
448/* DCE6 - SI */ 453/* DCE6 - SI */
@@ -465,9 +470,10 @@ int si_irq_set(struct radeon_device *rdev);
465int si_irq_process(struct radeon_device *rdev); 470int si_irq_process(struct radeon_device *rdev);
466int si_vm_init(struct radeon_device *rdev); 471int si_vm_init(struct radeon_device *rdev);
467void si_vm_fini(struct radeon_device *rdev); 472void si_vm_fini(struct radeon_device *rdev);
468int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); 473void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
469void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); 474 uint64_t addr, unsigned count,
470void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); 475 uint32_t incr, uint32_t flags);
476void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
471int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 477int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
472uint64_t si_get_gpu_clock(struct radeon_device *rdev); 478uint64_t si_get_gpu_clock(struct radeon_device *rdev);
473 479
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index c4b5d0542ee2..f22eb5713528 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1254,6 +1254,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1254 if (rdev->clock.max_pixel_clock == 0) 1254 if (rdev->clock.max_pixel_clock == 0)
1255 rdev->clock.max_pixel_clock = 40000; 1255 rdev->clock.max_pixel_clock = 40000;
1256 1256
1257 /* not technically a clock, but... */
1258 rdev->mode_info.firmware_flags =
1259 le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
1260
1257 return true; 1261 return true;
1258 } 1262 }
1259 1263
@@ -2005,7 +2009,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2005 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 2009 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2006 2010
2007 /* add the i2c bus for thermal/fan chip */ 2011 /* add the i2c bus for thermal/fan chip */
2008 if (power_info->info.ucOverdriveThermalController > 0) { 2012 if ((power_info->info.ucOverdriveThermalController > 0) &&
2013 (power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
2009 DRM_INFO("Possible %s thermal controller at 0x%02x\n", 2014 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
2010 thermal_controller_names[power_info->info.ucOverdriveThermalController], 2015 thermal_controller_names[power_info->info.ucOverdriveThermalController],
2011 power_info->info.ucOverdriveControllerAddress >> 1); 2016 power_info->info.ucOverdriveControllerAddress >> 1);
@@ -2209,7 +2214,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2209 (controller->ucType == 2214 (controller->ucType ==
2210 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { 2215 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
2211 DRM_INFO("Special thermal controller config\n"); 2216 DRM_INFO("Special thermal controller config\n");
2212 } else { 2217 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
2213 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 2218 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
2214 pp_lib_thermal_controller_names[controller->ucType], 2219 pp_lib_thermal_controller_names[controller->ucType],
2215 controller->ucI2cAddress >> 1, 2220 controller->ucI2cAddress >> 1,
@@ -2224,6 +2229,12 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2224 strlcpy(info.type, name, sizeof(info.type)); 2229 strlcpy(info.type, name, sizeof(info.type));
2225 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); 2230 i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
2226 } 2231 }
2232 } else {
2233 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
2234 controller->ucType,
2235 controller->ucI2cAddress >> 1,
2236 (controller->ucFanParameters &
2237 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
2227 } 2238 }
2228 } 2239 }
2229} 2240}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 2a2cf0b88a28..582e99449c12 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -12,30 +12,62 @@
12#include <acpi/acpi_bus.h> 12#include <acpi/acpi_bus.h>
13#include <linux/pci.h> 13#include <linux/pci.h>
14 14
15#define ATPX_VERSION 0 15#include "radeon_acpi.h"
16#define ATPX_GPU_PWR 2 16
17#define ATPX_MUX_SELECT 3 17struct radeon_atpx_functions {
18#define ATPX_I2C_MUX_SELECT 4 18 bool px_params;
19#define ATPX_SWITCH_START 5 19 bool power_cntl;
20#define ATPX_SWITCH_END 6 20 bool disp_mux_cntl;
21 21 bool i2c_mux_cntl;
22#define ATPX_INTEGRATED 0 22 bool switch_start;
23#define ATPX_DISCRETE 1 23 bool switch_end;
24 bool disp_connectors_mapping;
25 bool disp_detetion_ports;
26};
24 27
25#define ATPX_MUX_IGD 0 28struct radeon_atpx {
26#define ATPX_MUX_DISCRETE 1 29 acpi_handle handle;
30 struct radeon_atpx_functions functions;
31};
27 32
28static struct radeon_atpx_priv { 33static struct radeon_atpx_priv {
29 bool atpx_detected; 34 bool atpx_detected;
30 /* handle for device - and atpx */ 35 /* handle for device - and atpx */
31 acpi_handle dhandle; 36 acpi_handle dhandle;
32 acpi_handle atpx_handle; 37 struct radeon_atpx atpx;
33} radeon_atpx_priv; 38} radeon_atpx_priv;
34 39
35static int radeon_atpx_get_version(acpi_handle handle) 40struct atpx_verify_interface {
41 u16 size; /* structure size in bytes (includes size field) */
42 u16 version; /* version */
43 u32 function_bits; /* supported functions bit vector */
44} __packed;
45
46struct atpx_power_control {
47 u16 size;
48 u8 dgpu_state;
49} __packed;
50
51struct atpx_mux {
52 u16 size;
53 u16 mux;
54} __packed;
55
56/**
57 * radeon_atpx_call - call an ATPX method
58 *
59 * @handle: acpi handle
60 * @function: the ATPX function to execute
61 * @params: ATPX function params
62 *
63 * Executes the requested ATPX function (all asics).
64 * Returns a pointer to the acpi output buffer.
65 */
66static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
67 struct acpi_buffer *params)
36{ 68{
37 acpi_status status; 69 acpi_status status;
38 union acpi_object atpx_arg_elements[2], *obj; 70 union acpi_object atpx_arg_elements[2];
39 struct acpi_object_list atpx_arg; 71 struct acpi_object_list atpx_arg;
40 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 72 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
41 73
@@ -43,99 +75,292 @@ static int radeon_atpx_get_version(acpi_handle handle)
43 atpx_arg.pointer = &atpx_arg_elements[0]; 75 atpx_arg.pointer = &atpx_arg_elements[0];
44 76
45 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; 77 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
46 atpx_arg_elements[0].integer.value = ATPX_VERSION; 78 atpx_arg_elements[0].integer.value = function;
79
80 if (params) {
81 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
82 atpx_arg_elements[1].buffer.length = params->length;
83 atpx_arg_elements[1].buffer.pointer = params->pointer;
84 } else {
85 /* We need a second fake parameter */
86 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
87 atpx_arg_elements[1].integer.value = 0;
88 }
47 89
48 atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; 90 status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
49 atpx_arg_elements[1].integer.value = ATPX_VERSION;
50 91
51 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); 92 /* Fail only if calling the method fails and ATPX is supported */
52 if (ACPI_FAILURE(status)) { 93 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
53 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); 94 printk("failed to evaluate ATPX got %s\n",
54 return -ENOSYS; 95 acpi_format_exception(status));
96 kfree(buffer.pointer);
97 return NULL;
55 } 98 }
56 obj = (union acpi_object *)buffer.pointer; 99
57 if (obj && (obj->type == ACPI_TYPE_BUFFER)) 100 return buffer.pointer;
58 printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
59 kfree(buffer.pointer);
60 return 0;
61} 101}
62 102
63static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) 103/**
104 * radeon_atpx_parse_functions - parse supported functions
105 *
106 * @f: supported functions struct
107 * @mask: supported functions mask from ATPX
108 *
109 * Use the supported functions mask from ATPX function
110 * ATPX_FUNCTION_VERIFY_INTERFACE to determine what functions
111 * are supported (all asics).
112 */
113static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mask)
64{ 114{
65 acpi_status status; 115 f->px_params = mask & ATPX_GET_PX_PARAMETERS_SUPPORTED;
66 union acpi_object atpx_arg_elements[2]; 116 f->power_cntl = mask & ATPX_POWER_CONTROL_SUPPORTED;
67 struct acpi_object_list atpx_arg; 117 f->disp_mux_cntl = mask & ATPX_DISPLAY_MUX_CONTROL_SUPPORTED;
68 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 118 f->i2c_mux_cntl = mask & ATPX_I2C_MUX_CONTROL_SUPPORTED;
69 uint8_t buf[4] = {0}; 119 f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
70 120 f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
71 if (!handle) 121 f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
72 return -EINVAL; 122 f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
73 123}
74 atpx_arg.count = 2;
75 atpx_arg.pointer = &atpx_arg_elements[0];
76 124
77 atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; 125/**
78 atpx_arg_elements[0].integer.value = cmd_id; 126 * radeon_atpx_verify_interface - verify ATPX
127 *
128 * @handle: acpi handle
129 * @atpx: radeon atpx struct
130 *
131 * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
132 * to initialize ATPX and determine what features are supported
133 * (all asics).
134 * returns 0 on success, error on failure.
135 */
136static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
137{
138 union acpi_object *info;
139 struct atpx_verify_interface output;
140 size_t size;
141 int err = 0;
142
143 info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_VERIFY_INTERFACE, NULL);
144 if (!info)
145 return -EIO;
146
147 memset(&output, 0, sizeof(output));
148
149 size = *(u16 *) info->buffer.pointer;
150 if (size < 8) {
151 printk("ATPX buffer is too small: %lu\n", size);
152 err = -EINVAL;
153 goto out;
154 }
155 size = min(sizeof(output), size);
79 156
80 buf[2] = value & 0xff; 157 memcpy(&output, info->buffer.pointer, size);
81 buf[3] = (value >> 8) & 0xff;
82 158
83 atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; 159 /* TODO: check version? */
84 atpx_arg_elements[1].buffer.length = 4; 160 printk("ATPX version %u\n", output.version);
85 atpx_arg_elements[1].buffer.pointer = buf;
86 161
87 status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); 162 radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
88 if (ACPI_FAILURE(status)) {
89 printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
90 return -ENOSYS;
91 }
92 kfree(buffer.pointer);
93 163
94 return 0; 164out:
165 kfree(info);
166 return err;
95} 167}
96 168
97static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) 169/**
170 * radeon_atpx_set_discrete_state - power up/down discrete GPU
171 *
172 * @atpx: atpx info struct
173 * @state: discrete GPU state (0 = power down, 1 = power up)
174 *
175 * Execute the ATPX_FUNCTION_POWER_CONTROL ATPX function to
176 * power down/up the discrete GPU (all asics).
177 * Returns 0 on success, error on failure.
178 */
179static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
98{ 180{
99 return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); 181 struct acpi_buffer params;
182 union acpi_object *info;
183 struct atpx_power_control input;
184
185 if (atpx->functions.power_cntl) {
186 input.size = 3;
187 input.dgpu_state = state;
188 params.length = input.size;
189 params.pointer = &input;
190 info = radeon_atpx_call(atpx->handle,
191 ATPX_FUNCTION_POWER_CONTROL,
192 &params);
193 if (!info)
194 return -EIO;
195 kfree(info);
196 }
197 return 0;
100} 198}
101 199
102static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) 200/**
201 * radeon_atpx_switch_disp_mux - switch display mux
202 *
203 * @atpx: atpx info struct
204 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
205 *
206 * Execute the ATPX_FUNCTION_DISPLAY_MUX_CONTROL ATPX function to
207 * switch the display mux between the discrete GPU and integrated GPU
208 * (all asics).
209 * Returns 0 on success, error on failure.
210 */
211static int radeon_atpx_switch_disp_mux(struct radeon_atpx *atpx, u16 mux_id)
103{ 212{
104 return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); 213 struct acpi_buffer params;
214 union acpi_object *info;
215 struct atpx_mux input;
216
217 if (atpx->functions.disp_mux_cntl) {
218 input.size = 4;
219 input.mux = mux_id;
220 params.length = input.size;
221 params.pointer = &input;
222 info = radeon_atpx_call(atpx->handle,
223 ATPX_FUNCTION_DISPLAY_MUX_CONTROL,
224 &params);
225 if (!info)
226 return -EIO;
227 kfree(info);
228 }
229 return 0;
105} 230}
106 231
107static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id) 232/**
233 * radeon_atpx_switch_i2c_mux - switch i2c/hpd mux
234 *
235 * @atpx: atpx info struct
236 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
237 *
238 * Execute the ATPX_FUNCTION_I2C_MUX_CONTROL ATPX function to
239 * switch the i2c/hpd mux between the discrete GPU and integrated GPU
240 * (all asics).
241 * Returns 0 on success, error on failure.
242 */
243static int radeon_atpx_switch_i2c_mux(struct radeon_atpx *atpx, u16 mux_id)
108{ 244{
109 return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id); 245 struct acpi_buffer params;
246 union acpi_object *info;
247 struct atpx_mux input;
248
249 if (atpx->functions.i2c_mux_cntl) {
250 input.size = 4;
251 input.mux = mux_id;
252 params.length = input.size;
253 params.pointer = &input;
254 info = radeon_atpx_call(atpx->handle,
255 ATPX_FUNCTION_I2C_MUX_CONTROL,
256 &params);
257 if (!info)
258 return -EIO;
259 kfree(info);
260 }
261 return 0;
110} 262}
111 263
112static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id) 264/**
265 * radeon_atpx_switch_start - notify the sbios of a GPU switch
266 *
267 * @atpx: atpx info struct
268 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
269 *
270 * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION ATPX
271 * function to notify the sbios that a switch between the discrete GPU and
272 * integrated GPU has begun (all asics).
273 * Returns 0 on success, error on failure.
274 */
275static int radeon_atpx_switch_start(struct radeon_atpx *atpx, u16 mux_id)
113{ 276{
114 return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id); 277 struct acpi_buffer params;
278 union acpi_object *info;
279 struct atpx_mux input;
280
281 if (atpx->functions.switch_start) {
282 input.size = 4;
283 input.mux = mux_id;
284 params.length = input.size;
285 params.pointer = &input;
286 info = radeon_atpx_call(atpx->handle,
287 ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION,
288 &params);
289 if (!info)
290 return -EIO;
291 kfree(info);
292 }
293 return 0;
115} 294}
116 295
117static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id) 296/**
297 * radeon_atpx_switch_end - notify the sbios of a GPU switch
298 *
299 * @atpx: atpx info struct
300 * @mux_id: mux state (0 = integrated GPU, 1 = discrete GPU)
301 *
302 * Execute the ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION ATPX
303 * function to notify the sbios that a switch between the discrete GPU and
304 * integrated GPU has ended (all asics).
305 * Returns 0 on success, error on failure.
306 */
307static int radeon_atpx_switch_end(struct radeon_atpx *atpx, u16 mux_id)
118{ 308{
119 return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id); 309 struct acpi_buffer params;
310 union acpi_object *info;
311 struct atpx_mux input;
312
313 if (atpx->functions.switch_end) {
314 input.size = 4;
315 input.mux = mux_id;
316 params.length = input.size;
317 params.pointer = &input;
318 info = radeon_atpx_call(atpx->handle,
319 ATPX_FUNCTION_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION,
320 &params);
321 if (!info)
322 return -EIO;
323 kfree(info);
324 }
325 return 0;
120} 326}
121 327
328/**
329 * radeon_atpx_switchto - switch to the requested GPU
330 *
331 * @id: GPU to switch to
332 *
333 * Execute the necessary ATPX functions to switch between the discrete GPU and
334 * integrated GPU (all asics).
335 * Returns 0 on success, error on failure.
336 */
122static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) 337static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
123{ 338{
124 int gpu_id; 339 u16 gpu_id;
125 340
126 if (id == VGA_SWITCHEROO_IGD) 341 if (id == VGA_SWITCHEROO_IGD)
127 gpu_id = ATPX_INTEGRATED; 342 gpu_id = ATPX_INTEGRATED_GPU;
128 else 343 else
129 gpu_id = ATPX_DISCRETE; 344 gpu_id = ATPX_DISCRETE_GPU;
130 345
131 radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id); 346 radeon_atpx_switch_start(&radeon_atpx_priv.atpx, gpu_id);
132 radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id); 347 radeon_atpx_switch_disp_mux(&radeon_atpx_priv.atpx, gpu_id);
133 radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id); 348 radeon_atpx_switch_i2c_mux(&radeon_atpx_priv.atpx, gpu_id);
134 radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id); 349 radeon_atpx_switch_end(&radeon_atpx_priv.atpx, gpu_id);
135 350
136 return 0; 351 return 0;
137} 352}
138 353
354/**
355 * radeon_atpx_switchto - switch to the requested GPU
356 *
357 * @id: GPU to switch to
358 * @state: requested power state (0 = off, 1 = on)
359 *
360 * Execute the necessary ATPX function to power down/up the discrete GPU
361 * (all asics).
362 * Returns 0 on success, error on failure.
363 */
139static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, 364static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
140 enum vga_switcheroo_state state) 365 enum vga_switcheroo_state state)
141{ 366{
@@ -143,10 +368,18 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
143 if (id == VGA_SWITCHEROO_IGD) 368 if (id == VGA_SWITCHEROO_IGD)
144 return 0; 369 return 0;
145 370
146 radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); 371 radeon_atpx_set_discrete_state(&radeon_atpx_priv.atpx, state);
147 return 0; 372 return 0;
148} 373}
149 374
375/**
376 * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
377 *
378 * @pdev: pci device
379 *
380 * Look up the ATPX and ATRM handles (all asics).
381 * Returns true if the handles are found, false if not.
382 */
150static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) 383static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
151{ 384{
152 acpi_handle dhandle, atpx_handle; 385 acpi_handle dhandle, atpx_handle;
@@ -161,18 +394,30 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
161 return false; 394 return false;
162 395
163 radeon_atpx_priv.dhandle = dhandle; 396 radeon_atpx_priv.dhandle = dhandle;
164 radeon_atpx_priv.atpx_handle = atpx_handle; 397 radeon_atpx_priv.atpx.handle = atpx_handle;
165 return true; 398 return true;
166} 399}
167 400
401/**
402 * radeon_atpx_init - verify the ATPX interface
403 *
404 * Verify the ATPX interface (all asics).
405 * Returns 0 on success, error on failure.
406 */
168static int radeon_atpx_init(void) 407static int radeon_atpx_init(void)
169{ 408{
170 /* set up the ATPX handle */ 409 /* set up the ATPX handle */
171 410 return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
172 radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
173 return 0;
174} 411}
175 412
413/**
414 * radeon_atpx_get_client_id - get the client id
415 *
416 * @pdev: pci device
417 *
418 * look up whether we are the integrated or discrete GPU (all asics).
419 * Returns the client id.
420 */
176static int radeon_atpx_get_client_id(struct pci_dev *pdev) 421static int radeon_atpx_get_client_id(struct pci_dev *pdev)
177{ 422{
178 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) 423 if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
@@ -188,6 +433,12 @@ static struct vga_switcheroo_handler radeon_atpx_handler = {
188 .get_client_id = radeon_atpx_get_client_id, 433 .get_client_id = radeon_atpx_get_client_id,
189}; 434};
190 435
436/**
437 * radeon_atpx_detect - detect whether we have PX
438 *
439 * Check if we have a PX system (all asics).
440 * Returns true if we have a PX system, false if not.
441 */
191static bool radeon_atpx_detect(void) 442static bool radeon_atpx_detect(void)
192{ 443{
193 char acpi_method_name[255] = { 0 }; 444 char acpi_method_name[255] = { 0 };
@@ -203,7 +454,7 @@ static bool radeon_atpx_detect(void)
203 } 454 }
204 455
205 if (has_atpx && vga_count == 2) { 456 if (has_atpx && vga_count == 2) {
206 acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); 457 acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
207 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", 458 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
208 acpi_method_name); 459 acpi_method_name);
209 radeon_atpx_priv.atpx_detected = true; 460 radeon_atpx_priv.atpx_detected = true;
@@ -212,6 +463,11 @@ static bool radeon_atpx_detect(void)
212 return false; 463 return false;
213} 464}
214 465
466/**
467 * radeon_register_atpx_handler - register with vga_switcheroo
468 *
469 * Register the PX callbacks with vga_switcheroo (all asics).
470 */
215void radeon_register_atpx_handler(void) 471void radeon_register_atpx_handler(void)
216{ 472{
217 bool r; 473 bool r;
@@ -224,6 +480,11 @@ void radeon_register_atpx_handler(void)
224 vga_switcheroo_register_handler(&radeon_atpx_handler); 480 vga_switcheroo_register_handler(&radeon_atpx_handler);
225} 481}
226 482
483/**
484 * radeon_unregister_atpx_handler - unregister with vga_switcheroo
485 *
486 * Unregister the PX callbacks with vga_switcheroo (all asics).
487 */
227void radeon_unregister_atpx_handler(void) 488void radeon_unregister_atpx_handler(void)
228{ 489{
229 vga_switcheroo_unregister_handler(); 490 vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a3900e7bd77b..45b660b27cfc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3319,15 +3319,6 @@ static void combios_write_ram_size(struct drm_device *dev)
3319 WREG32(RADEON_CONFIG_MEMSIZE, mem_size); 3319 WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
3320} 3320}
3321 3321
3322void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
3323{
3324 uint16_t dyn_clk_info =
3325 combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
3326
3327 if (dyn_clk_info)
3328 combios_parse_pll_table(dev, dyn_clk_info);
3329}
3330
3331void radeon_combios_asic_init(struct drm_device *dev) 3322void radeon_combios_asic_init(struct drm_device *dev)
3332{ 3323{
3333 struct radeon_device *rdev = dev->dev_private; 3324 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 3bc22e341719..67cfc1795ecd 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,10 +40,6 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector);
46
47void radeon_connector_hotplug(struct drm_connector *connector) 43void radeon_connector_hotplug(struct drm_connector *connector)
48{ 44{
49 struct drm_device *dev = connector->dev; 45 struct drm_device *dev = connector->dev;
@@ -198,7 +194,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
198 } 194 }
199} 195}
200 196
201struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) 197static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
202{ 198{
203 struct drm_mode_object *obj; 199 struct drm_mode_object *obj;
204 struct drm_encoder *encoder; 200 struct drm_encoder *encoder;
@@ -219,7 +215,7 @@ struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int enc
219 return NULL; 215 return NULL;
220} 216}
221 217
222struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) 218static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
223{ 219{
224 int enc_id = connector->encoder_ids[0]; 220 int enc_id = connector->encoder_ids[0];
225 struct drm_mode_object *obj; 221 struct drm_mode_object *obj;
@@ -370,7 +366,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
370 } 366 }
371} 367}
372 368
373int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, 369static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
374 uint64_t val) 370 uint64_t val)
375{ 371{
376 struct drm_device *dev = connector->dev; 372 struct drm_device *dev = connector->dev;
@@ -691,13 +687,13 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
691} 687}
692 688
693 689
694struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { 690static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
695 .get_modes = radeon_lvds_get_modes, 691 .get_modes = radeon_lvds_get_modes,
696 .mode_valid = radeon_lvds_mode_valid, 692 .mode_valid = radeon_lvds_mode_valid,
697 .best_encoder = radeon_best_single_encoder, 693 .best_encoder = radeon_best_single_encoder,
698}; 694};
699 695
700struct drm_connector_funcs radeon_lvds_connector_funcs = { 696static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
701 .dpms = drm_helper_connector_dpms, 697 .dpms = drm_helper_connector_dpms,
702 .detect = radeon_lvds_detect, 698 .detect = radeon_lvds_detect,
703 .fill_modes = drm_helper_probe_single_connector_modes, 699 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -809,13 +805,13 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
809 return ret; 805 return ret;
810} 806}
811 807
812struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = { 808static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
813 .get_modes = radeon_vga_get_modes, 809 .get_modes = radeon_vga_get_modes,
814 .mode_valid = radeon_vga_mode_valid, 810 .mode_valid = radeon_vga_mode_valid,
815 .best_encoder = radeon_best_single_encoder, 811 .best_encoder = radeon_best_single_encoder,
816}; 812};
817 813
818struct drm_connector_funcs radeon_vga_connector_funcs = { 814static const struct drm_connector_funcs radeon_vga_connector_funcs = {
819 .dpms = drm_helper_connector_dpms, 815 .dpms = drm_helper_connector_dpms,
820 .detect = radeon_vga_detect, 816 .detect = radeon_vga_detect,
821 .fill_modes = drm_helper_probe_single_connector_modes, 817 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -879,13 +875,13 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
879 return ret; 875 return ret;
880} 876}
881 877
882struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { 878static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
883 .get_modes = radeon_tv_get_modes, 879 .get_modes = radeon_tv_get_modes,
884 .mode_valid = radeon_tv_mode_valid, 880 .mode_valid = radeon_tv_mode_valid,
885 .best_encoder = radeon_best_single_encoder, 881 .best_encoder = radeon_best_single_encoder,
886}; 882};
887 883
888struct drm_connector_funcs radeon_tv_connector_funcs = { 884static const struct drm_connector_funcs radeon_tv_connector_funcs = {
889 .dpms = drm_helper_connector_dpms, 885 .dpms = drm_helper_connector_dpms,
890 .detect = radeon_tv_detect, 886 .detect = radeon_tv_detect,
891 .fill_modes = drm_helper_probe_single_connector_modes, 887 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1089,7 +1085,7 @@ out:
1089} 1085}
1090 1086
1091/* okay need to be smart in here about which encoder to pick */ 1087/* okay need to be smart in here about which encoder to pick */
1092struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) 1088static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
1093{ 1089{
1094 int enc_id = connector->encoder_ids[0]; 1090 int enc_id = connector->encoder_ids[0];
1095 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1091 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1179,13 +1175,13 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1179 return MODE_OK; 1175 return MODE_OK;
1180} 1176}
1181 1177
1182struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 1178static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
1183 .get_modes = radeon_dvi_get_modes, 1179 .get_modes = radeon_dvi_get_modes,
1184 .mode_valid = radeon_dvi_mode_valid, 1180 .mode_valid = radeon_dvi_mode_valid,
1185 .best_encoder = radeon_dvi_encoder, 1181 .best_encoder = radeon_dvi_encoder,
1186}; 1182};
1187 1183
1188struct drm_connector_funcs radeon_dvi_connector_funcs = { 1184static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
1189 .dpms = drm_helper_connector_dpms, 1185 .dpms = drm_helper_connector_dpms,
1190 .detect = radeon_dvi_detect, 1186 .detect = radeon_dvi_detect,
1191 .fill_modes = drm_helper_probe_single_connector_modes, 1187 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1462,13 +1458,13 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1462 } 1458 }
1463} 1459}
1464 1460
1465struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { 1461static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
1466 .get_modes = radeon_dp_get_modes, 1462 .get_modes = radeon_dp_get_modes,
1467 .mode_valid = radeon_dp_mode_valid, 1463 .mode_valid = radeon_dp_mode_valid,
1468 .best_encoder = radeon_dvi_encoder, 1464 .best_encoder = radeon_dvi_encoder,
1469}; 1465};
1470 1466
1471struct drm_connector_funcs radeon_dp_connector_funcs = { 1467static const struct drm_connector_funcs radeon_dp_connector_funcs = {
1472 .dpms = drm_helper_connector_dpms, 1468 .dpms = drm_helper_connector_dpms,
1473 .detect = radeon_dp_detect, 1469 .detect = radeon_dp_detect,
1474 .fill_modes = drm_helper_probe_single_connector_modes, 1470 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -2008,15 +2004,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
2008 connector->polled = DRM_CONNECTOR_POLL_HPD; 2004 connector->polled = DRM_CONNECTOR_POLL_HPD;
2009 connector->display_info.subpixel_order = subpixel_order; 2005 connector->display_info.subpixel_order = subpixel_order;
2010 drm_sysfs_connector_add(connector); 2006 drm_sysfs_connector_add(connector);
2011 if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
2012 struct drm_encoder *drm_encoder;
2013
2014 list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
2015 struct radeon_encoder *radeon_encoder;
2016
2017 radeon_encoder = to_radeon_encoder(drm_encoder);
2018 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
2019 radeon_legacy_backlight_init(radeon_encoder, connector);
2020 }
2021 }
2022} 2007}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 891fff52ab65..cb7b7c062fef 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -32,7 +32,7 @@
32void r100_cs_dump_packet(struct radeon_cs_parser *p, 32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt); 33 struct radeon_cs_packet *pkt);
34 34
35int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 35static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{ 36{
37 struct drm_device *ddev = p->rdev->ddev; 37 struct drm_device *ddev = p->rdev->ddev;
38 struct radeon_cs_chunk *chunk; 38 struct radeon_cs_chunk *chunk;
@@ -115,19 +115,27 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
115 return 0; 115 return 0;
116} 116}
117 117
118static void radeon_cs_sync_to(struct radeon_cs_parser *p,
119 struct radeon_fence *fence)
120{
121 struct radeon_fence *other;
122
123 if (!fence)
124 return;
125
126 other = p->ib.sync_to[fence->ring];
127 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
128}
129
118static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 130static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{ 131{
120 int i; 132 int i;
121 133
122 for (i = 0; i < p->nrelocs; i++) { 134 for (i = 0; i < p->nrelocs; i++) {
123 struct radeon_fence *a, *b; 135 if (!p->relocs[i].robj)
124
125 if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
126 continue; 136 continue;
127 137
128 a = p->relocs[i].robj->tbo.sync_obj; 138 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
129 b = p->ib.sync_to[a->ring];
130 p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
131 } 139 }
132} 140}
133 141
@@ -278,30 +286,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
278 return 0; 286 return 0;
279} 287}
280 288
281static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
282 struct radeon_fence *fence)
283{
284 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
285 struct radeon_vm *vm = &fpriv->vm;
286 struct radeon_bo_list *lobj;
287
288 if (parser->chunk_ib_idx == -1) {
289 return;
290 }
291 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
292 return;
293 }
294
295 list_for_each_entry(lobj, &parser->validated, tv.head) {
296 struct radeon_bo_va *bo_va;
297 struct radeon_bo *rbo = lobj->bo;
298
299 bo_va = radeon_bo_va(rbo, vm);
300 radeon_fence_unref(&bo_va->fence);
301 bo_va->fence = radeon_fence_ref(fence);
302 }
303}
304
305/** 289/**
306 * cs_parser_fini() - clean parser states 290 * cs_parser_fini() - clean parser states
307 * @parser: parser structure holding parsing context. 291 * @parser: parser structure holding parsing context.
@@ -315,8 +299,6 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
315 unsigned i; 299 unsigned i;
316 300
317 if (!error) { 301 if (!error) {
318 /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
319 radeon_bo_vm_fence_va(parser, parser->ib.fence);
320 ttm_eu_fence_buffer_objects(&parser->validated, 302 ttm_eu_fence_buffer_objects(&parser->validated,
321 parser->ib.fence); 303 parser->ib.fence);
322 } else { 304 } else {
@@ -363,7 +345,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
363 * uncached). 345 * uncached).
364 */ 346 */
365 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 347 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
366 ib_chunk->length_dw * 4); 348 NULL, ib_chunk->length_dw * 4);
367 if (r) { 349 if (r) {
368 DRM_ERROR("Failed to get ib !\n"); 350 DRM_ERROR("Failed to get ib !\n");
369 return r; 351 return r;
@@ -380,7 +362,6 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
380 return r; 362 return r;
381 } 363 }
382 radeon_cs_sync_rings(parser); 364 radeon_cs_sync_rings(parser);
383 parser->ib.vm_id = 0;
384 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 365 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
385 if (r) { 366 if (r) {
386 DRM_ERROR("Failed to schedule IB !\n"); 367 DRM_ERROR("Failed to schedule IB !\n");
@@ -391,10 +372,15 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
391static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser, 372static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
392 struct radeon_vm *vm) 373 struct radeon_vm *vm)
393{ 374{
375 struct radeon_device *rdev = parser->rdev;
394 struct radeon_bo_list *lobj; 376 struct radeon_bo_list *lobj;
395 struct radeon_bo *bo; 377 struct radeon_bo *bo;
396 int r; 378 int r;
397 379
380 r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
381 if (r) {
382 return r;
383 }
398 list_for_each_entry(lobj, &parser->validated, tv.head) { 384 list_for_each_entry(lobj, &parser->validated, tv.head) {
399 bo = lobj->bo; 385 bo = lobj->bo;
400 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem); 386 r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
@@ -426,7 +412,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
426 return -EINVAL; 412 return -EINVAL;
427 } 413 }
428 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib, 414 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
429 ib_chunk->length_dw * 4); 415 vm, ib_chunk->length_dw * 4);
430 if (r) { 416 if (r) {
431 DRM_ERROR("Failed to get const ib !\n"); 417 DRM_ERROR("Failed to get const ib !\n");
432 return r; 418 return r;
@@ -450,7 +436,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
450 return -EINVAL; 436 return -EINVAL;
451 } 437 }
452 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 438 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
453 ib_chunk->length_dw * 4); 439 vm, ib_chunk->length_dw * 4);
454 if (r) { 440 if (r) {
455 DRM_ERROR("Failed to get ib !\n"); 441 DRM_ERROR("Failed to get ib !\n");
456 return r; 442 return r;
@@ -468,7 +454,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
468 454
469 mutex_lock(&rdev->vm_manager.lock); 455 mutex_lock(&rdev->vm_manager.lock);
470 mutex_lock(&vm->mutex); 456 mutex_lock(&vm->mutex);
471 r = radeon_vm_bind(rdev, vm); 457 r = radeon_vm_alloc_pt(rdev, vm);
472 if (r) { 458 if (r) {
473 goto out; 459 goto out;
474 } 460 }
@@ -477,32 +463,21 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
477 goto out; 463 goto out;
478 } 464 }
479 radeon_cs_sync_rings(parser); 465 radeon_cs_sync_rings(parser);
480 466 radeon_cs_sync_to(parser, vm->fence);
481 parser->ib.vm_id = vm->id; 467 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
482 /* ib pool is bind at 0 in virtual address space,
483 * so gpu_addr is the offset inside the pool bo
484 */
485 parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
486 468
487 if ((rdev->family >= CHIP_TAHITI) && 469 if ((rdev->family >= CHIP_TAHITI) &&
488 (parser->chunk_const_ib_idx != -1)) { 470 (parser->chunk_const_ib_idx != -1)) {
489 parser->const_ib.vm_id = vm->id;
490 /* ib pool is bind at 0 in virtual address space,
491 * so gpu_addr is the offset inside the pool bo
492 */
493 parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
494 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 471 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
495 } else { 472 } else {
496 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 473 r = radeon_ib_schedule(rdev, &parser->ib, NULL);
497 } 474 }
498 475
499out:
500 if (!r) { 476 if (!r) {
501 if (vm->fence) { 477 radeon_vm_fence(rdev, vm, parser->ib.fence);
502 radeon_fence_unref(&vm->fence);
503 }
504 vm->fence = radeon_fence_ref(parser->ib.fence);
505 } 478 }
479
480out:
506 mutex_unlock(&vm->mutex); 481 mutex_unlock(&vm->mutex);
507 mutex_unlock(&rdev->vm_manager.lock); 482 mutex_unlock(&rdev->vm_manager.lock);
508 return r; 483 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3daebd732d..64a42647f08a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -842,7 +842,7 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
842 * Validates certain module parameters and updates 842 * Validates certain module parameters and updates
843 * the associated values used by the driver (all asics). 843 * the associated values used by the driver (all asics).
844 */ 844 */
845void radeon_check_arguments(struct radeon_device *rdev) 845static void radeon_check_arguments(struct radeon_device *rdev)
846{ 846{
847 /* vramlimit must be a power of two */ 847 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) { 848 switch (radeon_vram_limit) {
@@ -1013,13 +1013,11 @@ int radeon_device_init(struct radeon_device *rdev,
1013 init_rwsem(&rdev->pm.mclk_lock); 1013 init_rwsem(&rdev->pm.mclk_lock);
1014 init_rwsem(&rdev->exclusive_lock); 1014 init_rwsem(&rdev->exclusive_lock);
1015 init_waitqueue_head(&rdev->irq.vblank_queue); 1015 init_waitqueue_head(&rdev->irq.vblank_queue);
1016 init_waitqueue_head(&rdev->irq.idle_queue);
1017 r = radeon_gem_init(rdev); 1016 r = radeon_gem_init(rdev);
1018 if (r) 1017 if (r)
1019 return r; 1018 return r;
1020 /* initialize vm here */ 1019 /* initialize vm here */
1021 mutex_init(&rdev->vm_manager.lock); 1020 mutex_init(&rdev->vm_manager.lock);
1022 rdev->vm_manager.use_bitmap = 1;
1023 rdev->vm_manager.max_pfn = 1 << 20; 1021 rdev->vm_manager.max_pfn = 1 << 20;
1024 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); 1022 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
1025 1023
@@ -1284,6 +1282,13 @@ int radeon_resume_kms(struct drm_device *dev)
1284 if (rdev->is_atom_bios) { 1282 if (rdev->is_atom_bios) {
1285 radeon_atom_encoder_init(rdev); 1283 radeon_atom_encoder_init(rdev);
1286 radeon_atom_disp_eng_pll_init(rdev); 1284 radeon_atom_disp_eng_pll_init(rdev);
1285 /* turn on the BL */
1286 if (rdev->mode_info.bl_encoder) {
1287 u8 bl_level = radeon_get_backlight_level(rdev,
1288 rdev->mode_info.bl_encoder);
1289 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1290 bl_level);
1291 }
1287 } 1292 }
1288 /* reset hpd state */ 1293 /* reset hpd state */
1289 radeon_hpd_init(rdev); 1294 radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 97f3fe7dd040..07eb84e8a8a4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -63,9 +63,11 @@
63 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query 63 * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
64 * 2.21.0 - r600-r700: FMASK and CMASK 64 * 2.21.0 - r600-r700: FMASK and CMASK
65 * 2.22.0 - r600 only: RESOLVE_BOX allowed 65 * 2.22.0 - r600 only: RESOLVE_BOX allowed
66 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
67 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
66 */ 68 */
67#define KMS_DRIVER_MAJOR 2 69#define KMS_DRIVER_MAJOR 2
68#define KMS_DRIVER_MINOR 22 70#define KMS_DRIVER_MINOR 24
69#define KMS_DRIVER_PATCHLEVEL 0 71#define KMS_DRIVER_PATCHLEVEL 0
70int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 72int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
71int radeon_driver_unload_kms(struct drm_device *dev); 73int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 5a1bae3a2426..bd4959ca23aa 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,6 +29,14 @@
29#include "radeon.h" 29#include "radeon.h"
30#include "atom.h" 30#include "atom.h"
31 31
32extern void
33radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
34 struct drm_connector *drm_connector);
35extern void
36radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
37 struct drm_connector *drm_connector);
38
39
32static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) 40static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
33{ 41{
34 struct drm_device *dev = encoder->dev; 42 struct drm_device *dev = encoder->dev;
@@ -153,6 +161,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
153void 161void
154radeon_link_encoder_connector(struct drm_device *dev) 162radeon_link_encoder_connector(struct drm_device *dev)
155{ 163{
164 struct radeon_device *rdev = dev->dev_private;
156 struct drm_connector *connector; 165 struct drm_connector *connector;
157 struct radeon_connector *radeon_connector; 166 struct radeon_connector *radeon_connector;
158 struct drm_encoder *encoder; 167 struct drm_encoder *encoder;
@@ -163,8 +172,16 @@ radeon_link_encoder_connector(struct drm_device *dev)
163 radeon_connector = to_radeon_connector(connector); 172 radeon_connector = to_radeon_connector(connector);
164 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 173 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
165 radeon_encoder = to_radeon_encoder(encoder); 174 radeon_encoder = to_radeon_encoder(encoder);
166 if (radeon_encoder->devices & radeon_connector->devices) 175 if (radeon_encoder->devices & radeon_connector->devices) {
167 drm_mode_connector_attach_encoder(connector, encoder); 176 drm_mode_connector_attach_encoder(connector, encoder);
177 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
178 if (rdev->is_atom_bios)
179 radeon_atom_backlight_init(radeon_encoder, connector);
180 else
181 radeon_legacy_backlight_init(radeon_encoder, connector);
182 rdev->mode_info.bl_encoder = radeon_encoder;
183 }
184 }
168 } 185 }
169 } 186 }
170} 187}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fae493710ebf..cc8489d8c6d1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -315,22 +315,6 @@ static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
315 return new_fb; 315 return new_fb;
316} 316}
317 317
318static char *mode_option;
319int radeon_parse_options(char *options)
320{
321 char *this_opt;
322
323 if (!options || !*options)
324 return 0;
325
326 while ((this_opt = strsep(&options, ",")) != NULL) {
327 if (!*this_opt)
328 continue;
329 mode_option = this_opt;
330 }
331 return 0;
332}
333
334void radeon_fb_output_poll_changed(struct radeon_device *rdev) 318void radeon_fb_output_poll_changed(struct radeon_device *rdev)
335{ 319{
336 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); 320 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5cd47ff03e48..22bd6c2c2740 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -398,7 +398,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
398 return 0; 398 return 0;
399} 399}
400 400
401bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) 401static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
402{ 402{
403 unsigned i; 403 unsigned i;
404 404
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 33cc03e310fd..f0c06d196b75 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev)
423 */ 423 */
424 424
425/** 425/**
426 * radeon_vm_directory_size - returns the size of the page directory in bytes
427 *
428 * @rdev: radeon_device pointer
429 *
430 * Calculate the size of the page directory in bytes (cayman+).
431 */
432static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
433{
434 return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8;
435}
436
437/**
426 * radeon_vm_manager_init - init the vm manager 438 * radeon_vm_manager_init - init the vm manager
427 * 439 *
428 * @rdev: radeon_device pointer 440 * @rdev: radeon_device pointer
@@ -435,12 +447,15 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
435 struct radeon_vm *vm; 447 struct radeon_vm *vm;
436 struct radeon_bo_va *bo_va; 448 struct radeon_bo_va *bo_va;
437 int r; 449 int r;
450 unsigned size;
438 451
439 if (!rdev->vm_manager.enabled) { 452 if (!rdev->vm_manager.enabled) {
440 /* mark first vm as always in use, it's the system one */
441 /* allocate enough for 2 full VM pts */ 453 /* allocate enough for 2 full VM pts */
454 size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
455 size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8);
456 size *= 2;
442 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, 457 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
443 rdev->vm_manager.max_pfn * 8 * 2, 458 size,
444 RADEON_GEM_DOMAIN_VRAM); 459 RADEON_GEM_DOMAIN_VRAM);
445 if (r) { 460 if (r) {
446 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", 461 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -448,10 +463,10 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
448 return r; 463 return r;
449 } 464 }
450 465
451 r = rdev->vm_manager.funcs->init(rdev); 466 r = radeon_asic_vm_init(rdev);
452 if (r) 467 if (r)
453 return r; 468 return r;
454 469
455 rdev->vm_manager.enabled = true; 470 rdev->vm_manager.enabled = true;
456 471
457 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); 472 r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
@@ -461,73 +476,36 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
461 476
462 /* restore page table */ 477 /* restore page table */
463 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { 478 list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
464 if (vm->id == -1) 479 if (vm->sa_bo == NULL)
465 continue; 480 continue;
466 481
467 list_for_each_entry(bo_va, &vm->va, vm_list) { 482 list_for_each_entry(bo_va, &vm->va, vm_list) {
468 struct ttm_mem_reg *mem = NULL;
469 if (bo_va->valid)
470 mem = &bo_va->bo->tbo.mem;
471
472 bo_va->valid = false; 483 bo_va->valid = false;
473 r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
474 if (r) {
475 DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
476 }
477 }
478
479 r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
480 if (r) {
481 DRM_ERROR("Failed to bind vm %d!\n", vm->id);
482 } 484 }
483 } 485 }
484 return 0; 486 return 0;
485} 487}
486 488
487/* global mutex must be lock */
488/** 489/**
489 * radeon_vm_unbind_locked - unbind a specific vm 490 * radeon_vm_free_pt - free the page table for a specific vm
490 * 491 *
491 * @rdev: radeon_device pointer 492 * @rdev: radeon_device pointer
492 * @vm: vm to unbind 493 * @vm: vm to unbind
493 * 494 *
494 * Unbind the requested vm (cayman+). 495 * Free the page table of a specific vm (cayman+).
495 * Wait for use of the VM to finish, then unbind the page table, 496 *
496 * and free the page table memory. 497 * Global and local mutex must be lock!
497 */ 498 */
498static void radeon_vm_unbind_locked(struct radeon_device *rdev, 499static void radeon_vm_free_pt(struct radeon_device *rdev,
499 struct radeon_vm *vm) 500 struct radeon_vm *vm)
500{ 501{
501 struct radeon_bo_va *bo_va; 502 struct radeon_bo_va *bo_va;
502 503
503 if (vm->id == -1) { 504 if (!vm->sa_bo)
504 return; 505 return;
505 }
506 506
507 /* wait for vm use to end */
508 while (vm->fence) {
509 int r;
510 r = radeon_fence_wait(vm->fence, false);
511 if (r)
512 DRM_ERROR("error while waiting for fence: %d\n", r);
513 if (r == -EDEADLK) {
514 mutex_unlock(&rdev->vm_manager.lock);
515 r = radeon_gpu_reset(rdev);
516 mutex_lock(&rdev->vm_manager.lock);
517 if (!r)
518 continue;
519 }
520 break;
521 }
522 radeon_fence_unref(&vm->fence);
523
524 /* hw unbind */
525 rdev->vm_manager.funcs->unbind(rdev, vm);
526 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
527 list_del_init(&vm->list); 507 list_del_init(&vm->list);
528 vm->id = -1; 508 radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence);
529 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
530 vm->pt = NULL;
531 509
532 list_for_each_entry(bo_va, &vm->va, vm_list) { 510 list_for_each_entry(bo_va, &vm->va, vm_list) {
533 bo_va->valid = false; 511 bo_va->valid = false;
@@ -544,16 +522,22 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
544void radeon_vm_manager_fini(struct radeon_device *rdev) 522void radeon_vm_manager_fini(struct radeon_device *rdev)
545{ 523{
546 struct radeon_vm *vm, *tmp; 524 struct radeon_vm *vm, *tmp;
525 int i;
547 526
548 if (!rdev->vm_manager.enabled) 527 if (!rdev->vm_manager.enabled)
549 return; 528 return;
550 529
551 mutex_lock(&rdev->vm_manager.lock); 530 mutex_lock(&rdev->vm_manager.lock);
552 /* unbind all active vm */ 531 /* free all allocated page tables */
553 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { 532 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
554 radeon_vm_unbind_locked(rdev, vm); 533 mutex_lock(&vm->mutex);
534 radeon_vm_free_pt(rdev, vm);
535 mutex_unlock(&vm->mutex);
555 } 536 }
556 rdev->vm_manager.funcs->fini(rdev); 537 for (i = 0; i < RADEON_NUM_VM; ++i) {
538 radeon_fence_unref(&rdev->vm_manager.active[i]);
539 }
540 radeon_asic_vm_fini(rdev);
557 mutex_unlock(&rdev->vm_manager.lock); 541 mutex_unlock(&rdev->vm_manager.lock);
558 542
559 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); 543 radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
@@ -561,46 +545,34 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
561 rdev->vm_manager.enabled = false; 545 rdev->vm_manager.enabled = false;
562} 546}
563 547
564/* global mutex must be locked */
565/** 548/**
566 * radeon_vm_unbind - locked version of unbind 549 * radeon_vm_alloc_pt - allocates a page table for a VM
567 *
568 * @rdev: radeon_device pointer
569 * @vm: vm to unbind
570 *
571 * Locked version that wraps radeon_vm_unbind_locked (cayman+).
572 */
573void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
574{
575 mutex_lock(&vm->mutex);
576 radeon_vm_unbind_locked(rdev, vm);
577 mutex_unlock(&vm->mutex);
578}
579
580/* global and local mutex must be locked */
581/**
582 * radeon_vm_bind - bind a page table to a VMID
583 * 550 *
584 * @rdev: radeon_device pointer 551 * @rdev: radeon_device pointer
585 * @vm: vm to bind 552 * @vm: vm to bind
586 * 553 *
587 * Bind the requested vm (cayman+). 554 * Allocate a page table for the requested vm (cayman+).
588 * Suballocate memory for the page table, allocate a VMID 555 * Also starts to populate the page table.
589 * and bind the page table to it, and finally start to populate
590 * the page table.
591 * Returns 0 for success, error for failure. 556 * Returns 0 for success, error for failure.
557 *
558 * Global and local mutex must be locked!
592 */ 559 */
593int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) 560int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
594{ 561{
595 struct radeon_vm *vm_evict; 562 struct radeon_vm *vm_evict;
596 unsigned i; 563 int r;
597 int id = -1, r; 564 u64 *pd_addr;
565 int tables_size;
598 566
599 if (vm == NULL) { 567 if (vm == NULL) {
600 return -EINVAL; 568 return -EINVAL;
601 } 569 }
602 570
603 if (vm->id != -1) { 571 /* allocate enough to cover the current VM size */
572 tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
573 tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8);
574
575 if (vm->sa_bo != NULL) {
604 /* update lru */ 576 /* update lru */
605 list_del_init(&vm->list); 577 list_del_init(&vm->list);
606 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); 578 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
@@ -609,98 +581,215 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
609 581
610retry: 582retry:
611 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, 583 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
612 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8), 584 tables_size, RADEON_GPU_PAGE_SIZE, false);
613 RADEON_GPU_PAGE_SIZE, false); 585 if (r == -ENOMEM) {
614 if (r) {
615 if (list_empty(&rdev->vm_manager.lru_vm)) { 586 if (list_empty(&rdev->vm_manager.lru_vm)) {
616 return r; 587 return r;
617 } 588 }
618 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); 589 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
619 radeon_vm_unbind(rdev, vm_evict); 590 mutex_lock(&vm_evict->mutex);
591 radeon_vm_free_pt(rdev, vm_evict);
592 mutex_unlock(&vm_evict->mutex);
620 goto retry; 593 goto retry;
594
595 } else if (r) {
596 return r;
621 } 597 }
622 vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
623 vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
624 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
625 598
626retry_id: 599 pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo);
627 /* search for free vm */ 600 vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
628 for (i = 0; i < rdev->vm_manager.nvm; i++) { 601 memset(pd_addr, 0, tables_size);
629 if (!(rdev->vm_manager.use_bitmap & (1 << i))) { 602
630 id = i; 603 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
631 break; 604 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
605 &rdev->ring_tmp_bo.bo->tbo.mem);
606}
607
608/**
609 * radeon_vm_grab_id - allocate the next free VMID
610 *
611 * @rdev: radeon_device pointer
612 * @vm: vm to allocate id for
613 * @ring: ring we want to submit job to
614 *
615 * Allocate an id for the vm (cayman+).
616 * Returns the fence we need to sync to (if any).
617 *
618 * Global and local mutex must be locked!
619 */
620struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
621 struct radeon_vm *vm, int ring)
622{
623 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
624 unsigned choices[2] = {};
625 unsigned i;
626
627 /* check if the id is still valid */
628 if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
629 return NULL;
630
631 /* we definately need to flush */
632 radeon_fence_unref(&vm->last_flush);
633
634 /* skip over VMID 0, since it is the system VM */
635 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
636 struct radeon_fence *fence = rdev->vm_manager.active[i];
637
638 if (fence == NULL) {
639 /* found a free one */
640 vm->id = i;
641 return NULL;
642 }
643
644 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
645 best[fence->ring] = fence;
646 choices[fence->ring == ring ? 0 : 1] = i;
632 } 647 }
633 } 648 }
634 /* evict vm if necessary */ 649
635 if (id == -1) { 650 for (i = 0; i < 2; ++i) {
636 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); 651 if (choices[i]) {
637 radeon_vm_unbind(rdev, vm_evict); 652 vm->id = choices[i];
638 goto retry_id; 653 return rdev->vm_manager.active[choices[i]];
654 }
639 } 655 }
640 656
641 /* do hw bind */ 657 /* should never happen */
642 r = rdev->vm_manager.funcs->bind(rdev, vm, id); 658 BUG();
643 if (r) { 659 return NULL;
644 radeon_sa_bo_free(rdev, &vm->sa_bo, NULL); 660}
645 return r; 661
662/**
663 * radeon_vm_fence - remember fence for vm
664 *
665 * @rdev: radeon_device pointer
666 * @vm: vm we want to fence
667 * @fence: fence to remember
668 *
669 * Fence the vm (cayman+).
670 * Set the fence used to protect page table and id.
671 *
672 * Global and local mutex must be locked!
673 */
674void radeon_vm_fence(struct radeon_device *rdev,
675 struct radeon_vm *vm,
676 struct radeon_fence *fence)
677{
678 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
679 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
680
681 radeon_fence_unref(&vm->fence);
682 vm->fence = radeon_fence_ref(fence);
683}
684
685/**
686 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
687 *
688 * @vm: requested vm
689 * @bo: requested buffer object
690 *
691 * Find @bo inside the requested vm (cayman+).
692 * Search inside the @bos vm list for the requested vm
693 * Returns the found bo_va or NULL if none is found
694 *
695 * Object has to be reserved!
696 */
697struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
698 struct radeon_bo *bo)
699{
700 struct radeon_bo_va *bo_va;
701
702 list_for_each_entry(bo_va, &bo->va, bo_list) {
703 if (bo_va->vm == vm) {
704 return bo_va;
705 }
646 } 706 }
647 rdev->vm_manager.use_bitmap |= 1 << id; 707 return NULL;
648 vm->id = id;
649 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
650 return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
651 &rdev->ring_tmp_bo.bo->tbo.mem);
652} 708}
653 709
654/* object have to be reserved */
655/** 710/**
656 * radeon_vm_bo_add - add a bo to a specific vm 711 * radeon_vm_bo_add - add a bo to a specific vm
657 * 712 *
658 * @rdev: radeon_device pointer 713 * @rdev: radeon_device pointer
659 * @vm: requested vm 714 * @vm: requested vm
660 * @bo: radeon buffer object 715 * @bo: radeon buffer object
661 * @offset: requested offset of the buffer in the VM address space
662 * @flags: attributes of pages (read/write/valid/etc.)
663 * 716 *
664 * Add @bo into the requested vm (cayman+). 717 * Add @bo into the requested vm (cayman+).
665 * Add @bo to the list of bos associated with the vm and validate 718 * Add @bo to the list of bos associated with the vm
666 * the offset requested within the vm address space. 719 * Returns newly added bo_va or NULL for failure
667 * Returns 0 for success, error for failure. 720 *
721 * Object has to be reserved!
668 */ 722 */
669int radeon_vm_bo_add(struct radeon_device *rdev, 723struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
670 struct radeon_vm *vm, 724 struct radeon_vm *vm,
671 struct radeon_bo *bo, 725 struct radeon_bo *bo)
672 uint64_t offset,
673 uint32_t flags)
674{ 726{
675 struct radeon_bo_va *bo_va, *tmp; 727 struct radeon_bo_va *bo_va;
676 struct list_head *head;
677 uint64_t size = radeon_bo_size(bo), last_offset = 0;
678 unsigned last_pfn;
679 728
680 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 729 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
681 if (bo_va == NULL) { 730 if (bo_va == NULL) {
682 return -ENOMEM; 731 return NULL;
683 } 732 }
684 bo_va->vm = vm; 733 bo_va->vm = vm;
685 bo_va->bo = bo; 734 bo_va->bo = bo;
686 bo_va->soffset = offset; 735 bo_va->soffset = 0;
687 bo_va->eoffset = offset + size; 736 bo_va->eoffset = 0;
688 bo_va->flags = flags; 737 bo_va->flags = 0;
689 bo_va->valid = false; 738 bo_va->valid = false;
739 bo_va->ref_count = 1;
690 INIT_LIST_HEAD(&bo_va->bo_list); 740 INIT_LIST_HEAD(&bo_va->bo_list);
691 INIT_LIST_HEAD(&bo_va->vm_list); 741 INIT_LIST_HEAD(&bo_va->vm_list);
692 /* make sure object fit at this offset */
693 if (bo_va->soffset >= bo_va->eoffset) {
694 kfree(bo_va);
695 return -EINVAL;
696 }
697 742
698 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE; 743 mutex_lock(&vm->mutex);
699 if (last_pfn > rdev->vm_manager.max_pfn) { 744 list_add(&bo_va->vm_list, &vm->va);
700 kfree(bo_va); 745 list_add_tail(&bo_va->bo_list, &bo->va);
701 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 746 mutex_unlock(&vm->mutex);
702 last_pfn, rdev->vm_manager.max_pfn); 747
703 return -EINVAL; 748 return bo_va;
749}
750
751/**
752 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
753 *
754 * @rdev: radeon_device pointer
755 * @bo_va: bo_va to store the address
756 * @soffset: requested offset of the buffer in the VM address space
757 * @flags: attributes of pages (read/write/valid/etc.)
758 *
759 * Set offset of @bo_va (cayman+).
760 * Validate and set the offset requested within the vm address space.
761 * Returns 0 for success, error for failure.
762 *
763 * Object has to be reserved!
764 */
765int radeon_vm_bo_set_addr(struct radeon_device *rdev,
766 struct radeon_bo_va *bo_va,
767 uint64_t soffset,
768 uint32_t flags)
769{
770 uint64_t size = radeon_bo_size(bo_va->bo);
771 uint64_t eoffset, last_offset = 0;
772 struct radeon_vm *vm = bo_va->vm;
773 struct radeon_bo_va *tmp;
774 struct list_head *head;
775 unsigned last_pfn;
776
777 if (soffset) {
778 /* make sure object fit at this offset */
779 eoffset = soffset + size;
780 if (soffset >= eoffset) {
781 return -EINVAL;
782 }
783
784 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
785 if (last_pfn > rdev->vm_manager.max_pfn) {
786 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
787 last_pfn, rdev->vm_manager.max_pfn);
788 return -EINVAL;
789 }
790
791 } else {
792 eoffset = last_pfn = 0;
704 } 793 }
705 794
706 mutex_lock(&vm->mutex); 795 mutex_lock(&vm->mutex);
@@ -713,7 +802,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
713 if (last_pfn > vm->last_pfn) { 802 if (last_pfn > vm->last_pfn) {
714 /* grow va space 32M by 32M */ 803 /* grow va space 32M by 32M */
715 unsigned align = ((32 << 20) >> 12) - 1; 804 unsigned align = ((32 << 20) >> 12) - 1;
716 radeon_vm_unbind_locked(rdev, vm); 805 radeon_vm_free_pt(rdev, vm);
717 vm->last_pfn = (last_pfn + align) & ~align; 806 vm->last_pfn = (last_pfn + align) & ~align;
718 } 807 }
719 mutex_unlock(&rdev->vm_manager.lock); 808 mutex_unlock(&rdev->vm_manager.lock);
@@ -721,68 +810,60 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
721 head = &vm->va; 810 head = &vm->va;
722 last_offset = 0; 811 last_offset = 0;
723 list_for_each_entry(tmp, &vm->va, vm_list) { 812 list_for_each_entry(tmp, &vm->va, vm_list) {
724 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) { 813 if (bo_va == tmp) {
814 /* skip over currently modified bo */
815 continue;
816 }
817
818 if (soffset >= last_offset && eoffset <= tmp->soffset) {
725 /* bo can be added before this one */ 819 /* bo can be added before this one */
726 break; 820 break;
727 } 821 }
728 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) { 822 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
729 /* bo and tmp overlap, invalid offset */ 823 /* bo and tmp overlap, invalid offset */
730 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", 824 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
731 bo, (unsigned)bo_va->soffset, tmp->bo, 825 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
732 (unsigned)tmp->soffset, (unsigned)tmp->eoffset); 826 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
733 kfree(bo_va);
734 mutex_unlock(&vm->mutex); 827 mutex_unlock(&vm->mutex);
735 return -EINVAL; 828 return -EINVAL;
736 } 829 }
737 last_offset = tmp->eoffset; 830 last_offset = tmp->eoffset;
738 head = &tmp->vm_list; 831 head = &tmp->vm_list;
739 } 832 }
740 list_add(&bo_va->vm_list, head); 833
741 list_add_tail(&bo_va->bo_list, &bo->va); 834 bo_va->soffset = soffset;
835 bo_va->eoffset = eoffset;
836 bo_va->flags = flags;
837 bo_va->valid = false;
838 list_move(&bo_va->vm_list, head);
839
742 mutex_unlock(&vm->mutex); 840 mutex_unlock(&vm->mutex);
743 return 0; 841 return 0;
744} 842}
745 843
746/** 844/**
747 * radeon_vm_get_addr - get the physical address of the page 845 * radeon_vm_map_gart - get the physical address of a gart page
748 * 846 *
749 * @rdev: radeon_device pointer 847 * @rdev: radeon_device pointer
750 * @mem: ttm mem 848 * @addr: the unmapped addr
751 * @pfn: pfn
752 * 849 *
753 * Look up the physical address of the page that the pte resolves 850 * Look up the physical address of the page that the pte resolves
754 * to (cayman+). 851 * to (cayman+).
755 * Returns the physical address of the page. 852 * Returns the physical address of the page.
756 */ 853 */
757static u64 radeon_vm_get_addr(struct radeon_device *rdev, 854uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
758 struct ttm_mem_reg *mem,
759 unsigned pfn)
760{ 855{
761 u64 addr = 0; 856 uint64_t result;
762 857
763 switch (mem->mem_type) { 858 /* page table offset */
764 case TTM_PL_VRAM: 859 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
765 addr = (mem->start << PAGE_SHIFT); 860
766 addr += pfn * RADEON_GPU_PAGE_SIZE; 861 /* in case cpu page size != gpu page size*/
767 addr += rdev->vm_manager.vram_base_offset; 862 result |= addr & (~PAGE_MASK);
768 break; 863
769 case TTM_PL_TT: 864 return result;
770 /* offset inside page table */
771 addr = mem->start << PAGE_SHIFT;
772 addr += pfn * RADEON_GPU_PAGE_SIZE;
773 addr = addr >> PAGE_SHIFT;
774 /* page table offset */
775 addr = rdev->gart.pages_addr[addr];
776 /* in case cpu page size != gpu page size*/
777 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
778 break;
779 default:
780 break;
781 }
782 return addr;
783} 865}
784 866
785/* object have to be reserved & global and local mutex must be locked */
786/** 867/**
787 * radeon_vm_bo_update_pte - map a bo into the vm page table 868 * radeon_vm_bo_update_pte - map a bo into the vm page table
788 * 869 *
@@ -793,103 +874,160 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
793 * 874 *
794 * Fill in the page table entries for @bo (cayman+). 875 * Fill in the page table entries for @bo (cayman+).
795 * Returns 0 for success, -EINVAL for failure. 876 * Returns 0 for success, -EINVAL for failure.
877 *
878 * Object have to be reserved & global and local mutex must be locked!
796 */ 879 */
797int radeon_vm_bo_update_pte(struct radeon_device *rdev, 880int radeon_vm_bo_update_pte(struct radeon_device *rdev,
798 struct radeon_vm *vm, 881 struct radeon_vm *vm,
799 struct radeon_bo *bo, 882 struct radeon_bo *bo,
800 struct ttm_mem_reg *mem) 883 struct ttm_mem_reg *mem)
801{ 884{
885 unsigned ridx = rdev->asic->vm.pt_ring_index;
886 struct radeon_ring *ring = &rdev->ring[ridx];
887 struct radeon_semaphore *sem = NULL;
802 struct radeon_bo_va *bo_va; 888 struct radeon_bo_va *bo_va;
803 unsigned ngpu_pages, i; 889 unsigned nptes, npdes, ndw;
804 uint64_t addr = 0, pfn; 890 uint64_t pe, addr;
805 uint32_t flags; 891 uint64_t pfn;
892 int r;
806 893
807 /* nothing to do if vm isn't bound */ 894 /* nothing to do if vm isn't bound */
808 if (vm->id == -1) 895 if (vm->sa_bo == NULL)
809 return 0; 896 return 0;
810 897
811 bo_va = radeon_bo_va(bo, vm); 898 bo_va = radeon_vm_bo_find(vm, bo);
812 if (bo_va == NULL) { 899 if (bo_va == NULL) {
813 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); 900 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
814 return -EINVAL; 901 return -EINVAL;
815 } 902 }
816 903
817 if (bo_va->valid && mem) 904 if (!bo_va->soffset) {
905 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
906 bo, vm);
907 return -EINVAL;
908 }
909
910 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
818 return 0; 911 return 0;
819 912
820 ngpu_pages = radeon_bo_ngpu_pages(bo);
821 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 913 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
822 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 914 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
823 if (mem) { 915 if (mem) {
916 addr = mem->start << PAGE_SHIFT;
824 if (mem->mem_type != TTM_PL_SYSTEM) { 917 if (mem->mem_type != TTM_PL_SYSTEM) {
825 bo_va->flags |= RADEON_VM_PAGE_VALID; 918 bo_va->flags |= RADEON_VM_PAGE_VALID;
826 bo_va->valid = true; 919 bo_va->valid = true;
827 } 920 }
828 if (mem->mem_type == TTM_PL_TT) { 921 if (mem->mem_type == TTM_PL_TT) {
829 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 922 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
923 } else {
924 addr += rdev->vm_manager.vram_base_offset;
830 } 925 }
926 } else {
927 addr = 0;
928 bo_va->valid = false;
831 } 929 }
832 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE; 930
833 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags); 931 if (vm->fence && radeon_fence_signaled(vm->fence)) {
834 for (i = 0, addr = 0; i < ngpu_pages; i++) { 932 radeon_fence_unref(&vm->fence);
835 if (mem && bo_va->valid) { 933 }
836 addr = radeon_vm_get_addr(rdev, mem, i); 934
935 if (vm->fence && vm->fence->ring != ridx) {
936 r = radeon_semaphore_create(rdev, &sem);
937 if (r) {
938 return r;
837 } 939 }
838 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
839 } 940 }
840 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm); 941
942 /* estimate number of dw needed */
943 /* reserve space for 32-bit padding */
944 ndw = 32;
945
946 nptes = radeon_bo_ngpu_pages(bo);
947
948 pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE);
949
950 /* handle cases where a bo spans several pdes */
951 npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) -
952 (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE;
953
954 /* reserve space for one header for every 2k dwords */
955 ndw += (nptes >> 11) * 3;
956 /* reserve space for pte addresses */
957 ndw += nptes * 2;
958
959 /* reserve space for one header for every 2k dwords */
960 ndw += (npdes >> 11) * 3;
961 /* reserve space for pde addresses */
962 ndw += npdes * 2;
963
964 r = radeon_ring_lock(rdev, ring, ndw);
965 if (r) {
966 return r;
967 }
968
969 if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
970 radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
971 radeon_fence_note_sync(vm->fence, ridx);
972 }
973
974 /* update page table entries */
975 pe = vm->pd_gpu_addr;
976 pe += radeon_vm_directory_size(rdev);
977 pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8;
978
979 radeon_asic_vm_set_page(rdev, pe, addr, nptes,
980 RADEON_GPU_PAGE_SIZE, bo_va->flags);
981
982 /* update page directory entries */
983 addr = pe;
984
985 pe = vm->pd_gpu_addr;
986 pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8;
987
988 radeon_asic_vm_set_page(rdev, pe, addr, npdes,
989 RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID);
990
991 radeon_fence_unref(&vm->fence);
992 r = radeon_fence_emit(rdev, &vm->fence, ridx);
993 if (r) {
994 radeon_ring_unlock_undo(rdev, ring);
995 return r;
996 }
997 radeon_ring_unlock_commit(rdev, ring);
998 radeon_semaphore_free(rdev, &sem, vm->fence);
999 radeon_fence_unref(&vm->last_flush);
841 return 0; 1000 return 0;
842} 1001}
843 1002
844/* object have to be reserved */
845/** 1003/**
846 * radeon_vm_bo_rmv - remove a bo to a specific vm 1004 * radeon_vm_bo_rmv - remove a bo to a specific vm
847 * 1005 *
848 * @rdev: radeon_device pointer 1006 * @rdev: radeon_device pointer
849 * @vm: requested vm 1007 * @bo_va: requested bo_va
850 * @bo: radeon buffer object
851 * 1008 *
852 * Remove @bo from the requested vm (cayman+). 1009 * Remove @bo_va->bo from the requested vm (cayman+).
853 * Remove @bo from the list of bos associated with the vm and 1010 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
854 * remove the ptes for @bo in the page table. 1011 * remove the ptes for @bo_va in the page table.
855 * Returns 0 for success. 1012 * Returns 0 for success.
1013 *
1014 * Object have to be reserved!
856 */ 1015 */
857int radeon_vm_bo_rmv(struct radeon_device *rdev, 1016int radeon_vm_bo_rmv(struct radeon_device *rdev,
858 struct radeon_vm *vm, 1017 struct radeon_bo_va *bo_va)
859 struct radeon_bo *bo)
860{ 1018{
861 struct radeon_bo_va *bo_va;
862 int r; 1019 int r;
863 1020
864 bo_va = radeon_bo_va(bo, vm);
865 if (bo_va == NULL)
866 return 0;
867
868 /* wait for va use to end */
869 while (bo_va->fence) {
870 r = radeon_fence_wait(bo_va->fence, false);
871 if (r) {
872 DRM_ERROR("error while waiting for fence: %d\n", r);
873 }
874 if (r == -EDEADLK) {
875 r = radeon_gpu_reset(rdev);
876 if (!r)
877 continue;
878 }
879 break;
880 }
881 radeon_fence_unref(&bo_va->fence);
882
883 mutex_lock(&rdev->vm_manager.lock); 1021 mutex_lock(&rdev->vm_manager.lock);
884 mutex_lock(&vm->mutex); 1022 mutex_lock(&bo_va->vm->mutex);
885 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 1023 r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
886 mutex_unlock(&rdev->vm_manager.lock); 1024 mutex_unlock(&rdev->vm_manager.lock);
887 list_del(&bo_va->vm_list); 1025 list_del(&bo_va->vm_list);
888 mutex_unlock(&vm->mutex); 1026 mutex_unlock(&bo_va->vm->mutex);
889 list_del(&bo_va->bo_list); 1027 list_del(&bo_va->bo_list);
890 1028
891 kfree(bo_va); 1029 kfree(bo_va);
892 return 0; 1030 return r;
893} 1031}
894 1032
895/** 1033/**
@@ -925,27 +1063,23 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
925 */ 1063 */
926int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1064int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
927{ 1065{
1066 struct radeon_bo_va *bo_va;
928 int r; 1067 int r;
929 1068
930 vm->id = -1; 1069 vm->id = 0;
931 vm->fence = NULL; 1070 vm->fence = NULL;
1071 vm->last_pfn = 0;
932 mutex_init(&vm->mutex); 1072 mutex_init(&vm->mutex);
933 INIT_LIST_HEAD(&vm->list); 1073 INIT_LIST_HEAD(&vm->list);
934 INIT_LIST_HEAD(&vm->va); 1074 INIT_LIST_HEAD(&vm->va);
935 /* SI requires equal sized PTs for all VMs, so always set 1075
936 * last_pfn to max_pfn. cayman allows variable sized
937 * pts so we can grow then as needed. Once we switch
938 * to two level pts we can unify this again.
939 */
940 if (rdev->family >= CHIP_TAHITI)
941 vm->last_pfn = rdev->vm_manager.max_pfn;
942 else
943 vm->last_pfn = 0;
944 /* map the ib pool buffer at 0 in virtual address space, set 1076 /* map the ib pool buffer at 0 in virtual address space, set
945 * read only 1077 * read only
946 */ 1078 */
947 r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0, 1079 bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
948 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); 1080 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1081 RADEON_VM_PAGE_READABLE |
1082 RADEON_VM_PAGE_SNOOPED);
949 return r; 1083 return r;
950} 1084}
951 1085
@@ -965,7 +1099,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
965 1099
966 mutex_lock(&rdev->vm_manager.lock); 1100 mutex_lock(&rdev->vm_manager.lock);
967 mutex_lock(&vm->mutex); 1101 mutex_lock(&vm->mutex);
968 radeon_vm_unbind_locked(rdev, vm); 1102 radeon_vm_free_pt(rdev, vm);
969 mutex_unlock(&rdev->vm_manager.lock); 1103 mutex_unlock(&rdev->vm_manager.lock);
970 1104
971 /* remove all bo at this point non are busy any more because unbind 1105 /* remove all bo at this point non are busy any more because unbind
@@ -973,10 +1107,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
973 */ 1107 */
974 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 1108 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
975 if (!r) { 1109 if (!r) {
976 bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); 1110 bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
977 list_del_init(&bo_va->bo_list); 1111 list_del_init(&bo_va->bo_list);
978 list_del_init(&bo_va->vm_list); 1112 list_del_init(&bo_va->vm_list);
979 radeon_fence_unref(&bo_va->fence);
980 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 1113 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
981 kfree(bo_va); 1114 kfree(bo_va);
982 } 1115 }
@@ -988,10 +1121,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
988 r = radeon_bo_reserve(bo_va->bo, false); 1121 r = radeon_bo_reserve(bo_va->bo, false);
989 if (!r) { 1122 if (!r) {
990 list_del_init(&bo_va->bo_list); 1123 list_del_init(&bo_va->bo_list);
991 radeon_fence_unref(&bo_va->fence);
992 radeon_bo_unreserve(bo_va->bo); 1124 radeon_bo_unreserve(bo_va->bo);
993 kfree(bo_va); 1125 kfree(bo_va);
994 } 1126 }
995 } 1127 }
1128 radeon_fence_unref(&vm->fence);
1129 radeon_fence_unref(&vm->last_flush);
996 mutex_unlock(&vm->mutex); 1130 mutex_unlock(&vm->mutex);
997} 1131}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 04c212da6f65..f38fbcc46935 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -123,6 +123,30 @@ void radeon_gem_fini(struct radeon_device *rdev)
123 */ 123 */
124int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) 124int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
125{ 125{
126 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
127 struct radeon_device *rdev = rbo->rdev;
128 struct radeon_fpriv *fpriv = file_priv->driver_priv;
129 struct radeon_vm *vm = &fpriv->vm;
130 struct radeon_bo_va *bo_va;
131 int r;
132
133 if (rdev->family < CHIP_CAYMAN) {
134 return 0;
135 }
136
137 r = radeon_bo_reserve(rbo, false);
138 if (r) {
139 return r;
140 }
141
142 bo_va = radeon_vm_bo_find(vm, rbo);
143 if (!bo_va) {
144 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
145 } else {
146 ++bo_va->ref_count;
147 }
148 radeon_bo_unreserve(rbo);
149
126 return 0; 150 return 0;
127} 151}
128 152
@@ -133,16 +157,25 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
133 struct radeon_device *rdev = rbo->rdev; 157 struct radeon_device *rdev = rbo->rdev;
134 struct radeon_fpriv *fpriv = file_priv->driver_priv; 158 struct radeon_fpriv *fpriv = file_priv->driver_priv;
135 struct radeon_vm *vm = &fpriv->vm; 159 struct radeon_vm *vm = &fpriv->vm;
160 struct radeon_bo_va *bo_va;
161 int r;
136 162
137 if (rdev->family < CHIP_CAYMAN) { 163 if (rdev->family < CHIP_CAYMAN) {
138 return; 164 return;
139 } 165 }
140 166
141 if (radeon_bo_reserve(rbo, false)) { 167 r = radeon_bo_reserve(rbo, true);
142 dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); 168 if (r) {
169 dev_err(rdev->dev, "leaking bo va because "
170 "we fail to reserve bo (%d)\n", r);
143 return; 171 return;
144 } 172 }
145 radeon_vm_bo_rmv(rdev, vm, rbo); 173 bo_va = radeon_vm_bo_find(vm, rbo);
174 if (bo_va) {
175 if (--bo_va->ref_count == 0) {
176 radeon_vm_bo_rmv(rdev, bo_va);
177 }
178 }
146 radeon_bo_unreserve(rbo); 179 radeon_bo_unreserve(rbo);
147} 180}
148 181
@@ -458,19 +491,24 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
458 drm_gem_object_unreference_unlocked(gobj); 491 drm_gem_object_unreference_unlocked(gobj);
459 return r; 492 return r;
460 } 493 }
494 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
495 if (!bo_va) {
496 args->operation = RADEON_VA_RESULT_ERROR;
497 drm_gem_object_unreference_unlocked(gobj);
498 return -ENOENT;
499 }
500
461 switch (args->operation) { 501 switch (args->operation) {
462 case RADEON_VA_MAP: 502 case RADEON_VA_MAP:
463 bo_va = radeon_bo_va(rbo, &fpriv->vm); 503 if (bo_va->soffset) {
464 if (bo_va) {
465 args->operation = RADEON_VA_RESULT_VA_EXIST; 504 args->operation = RADEON_VA_RESULT_VA_EXIST;
466 args->offset = bo_va->soffset; 505 args->offset = bo_va->soffset;
467 goto out; 506 goto out;
468 } 507 }
469 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo, 508 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
470 args->offset, args->flags);
471 break; 509 break;
472 case RADEON_VA_UNMAP: 510 case RADEON_VA_UNMAP:
473 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo); 511 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
474 break; 512 break;
475 default: 513 default:
476 break; 514 break;
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index 8fc81a26438a..c180df8e84db 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
368#define compat_radeon_cp_setparam NULL 368#define compat_radeon_cp_setparam NULL
369#endif /* X86_64 || IA64 */ 369#endif /* X86_64 || IA64 */
370 370
371drm_ioctl_compat_t *radeon_compat_ioctls[] = { 371static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
372 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, 372 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
373 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, 373 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
374 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, 374 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9201992cee12..90374dd77960 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -99,7 +99,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
99 /* Disable *all* interrupts */ 99 /* Disable *all* interrupts */
100 for (i = 0; i < RADEON_NUM_RINGS; i++) 100 for (i = 0; i < RADEON_NUM_RINGS; i++)
101 atomic_set(&rdev->irq.ring_int[i], 0); 101 atomic_set(&rdev->irq.ring_int[i], 0);
102 rdev->irq.gui_idle = false;
103 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 102 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
104 rdev->irq.hpd[i] = false; 103 rdev->irq.hpd[i] = false;
105 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 104 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -147,7 +146,6 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
147 /* Disable *all* interrupts */ 146 /* Disable *all* interrupts */
148 for (i = 0; i < RADEON_NUM_RINGS; i++) 147 for (i = 0; i < RADEON_NUM_RINGS; i++)
149 atomic_set(&rdev->irq.ring_int[i], 0); 148 atomic_set(&rdev->irq.ring_int[i], 0);
150 rdev->irq.gui_idle = false;
151 for (i = 0; i < RADEON_MAX_HPD_PINS; i++) 149 for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
152 rdev->irq.hpd[i] = false; 150 rdev->irq.hpd[i] = false;
153 for (i = 0; i < RADEON_MAX_CRTCS; i++) { 151 for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -204,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
204 (rdev->pdev->subsystem_device == 0x01fd)) 202 (rdev->pdev->subsystem_device == 0x01fd))
205 return true; 203 return true;
206 204
205 /* Gateway RS690 only seems to work with MSIs. */
206 if ((rdev->pdev->device == 0x791f) &&
207 (rdev->pdev->subsystem_vendor == 0x107b) &&
208 (rdev->pdev->subsystem_device == 0x0185))
209 return true;
210
211 /* try and enable MSIs by default on all RS690s */
212 if (rdev->family == CHIP_RS690)
213 return true;
214
207 /* RV515 seems to have MSI issues where it loses 215 /* RV515 seems to have MSI issues where it loses
208 * MSI rearms occasionally. This leads to lockups and freezes. 216 * MSI rearms occasionally. This leads to lockups and freezes.
209 * disable it by default. 217 * disable it by default.
@@ -457,34 +465,3 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
457 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 465 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
458} 466}
459 467
460/**
461 * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
462 *
463 * @rdev: radeon device pointer
464 *
465 * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
466 * This is currently used to make sure the 3D engine is idle for power
467 * management, but should be replaces with proper fence waits.
468 * GUI idle interrupts don't work very well on pre-r6xx hw and it also
469 * does not take into account other aspects of the chip that may be busy.
470 * DO NOT USE GOING FORWARD.
471 */
472int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
473{
474 unsigned long irqflags;
475 int r;
476
477 spin_lock_irqsave(&rdev->irq.lock, irqflags);
478 rdev->irq.gui_idle = true;
479 radeon_irq_set(rdev);
480 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
481
482 r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
483 msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
484
485 spin_lock_irqsave(&rdev->irq.lock, irqflags);
486 rdev->irq.gui_idle = false;
487 radeon_irq_set(rdev);
488 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
489 return r;
490}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8a7f87f17c13..83b8d8aa71c0 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -50,6 +50,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
50 50
51 if (rdev == NULL) 51 if (rdev == NULL)
52 return 0; 52 return 0;
53 radeon_acpi_fini(rdev);
53 radeon_modeset_fini(rdev); 54 radeon_modeset_fini(rdev);
54 radeon_device_fini(rdev); 55 radeon_device_fini(rdev);
55 kfree(rdev); 56 kfree(rdev);
@@ -102,11 +103,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
102 goto out; 103 goto out;
103 } 104 }
104 105
105 /* Call ACPI methods */
106 acpi_status = radeon_acpi_init(rdev);
107 if (acpi_status)
108 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
109
110 /* Again modeset_init should fail only on fatal error 106 /* Again modeset_init should fail only on fatal error
111 * otherwise it should provide enough functionalities 107 * otherwise it should provide enough functionalities
112 * for shadowfb to run 108 * for shadowfb to run
@@ -114,6 +110,17 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
114 r = radeon_modeset_init(rdev); 110 r = radeon_modeset_init(rdev);
115 if (r) 111 if (r)
116 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 112 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
113
114 /* Call ACPI methods: require modeset init
115 * but failure is not fatal
116 */
117 if (!r) {
118 acpi_status = radeon_acpi_init(rdev);
119 if (acpi_status)
120 dev_dbg(&dev->pdev->dev,
121 "Error during ACPI methods call\n");
122 }
123
117out: 124out:
118 if (r) 125 if (r)
119 radeon_driver_unload_kms(dev); 126 radeon_driver_unload_kms(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 94b4a1c12893..5677a424b585 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -206,11 +206,6 @@ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
206 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); 206 WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
207} 207}
208 208
209void radeon_restore_common_regs(struct drm_device *dev)
210{
211 /* don't need this yet */
212}
213
214static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev) 209static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
215{ 210{
216 struct radeon_device *rdev = dev->dev_private; 211 struct radeon_device *rdev = dev->dev_private;
@@ -295,7 +290,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div,
295 return 1; 290 return 1;
296} 291}
297 292
298void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) 293static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
299{ 294{
300 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
301 struct drm_device *dev = crtc->dev; 296 struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 757b08f07195..92487e614778 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -271,13 +271,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
271 271
272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 272#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
273 273
274#define MAX_RADEON_LEVEL 0xFF
275
276struct radeon_backlight_privdata {
277 struct radeon_encoder *encoder;
278 uint8_t negative;
279};
280
281static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) 274static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
282{ 275{
283 struct radeon_backlight_privdata *pdata = bl_get_data(bd); 276 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
@@ -286,21 +279,33 @@ static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
286 /* Convert brightness to hardware level */ 279 /* Convert brightness to hardware level */
287 if (bd->props.brightness < 0) 280 if (bd->props.brightness < 0)
288 level = 0; 281 level = 0;
289 else if (bd->props.brightness > MAX_RADEON_LEVEL) 282 else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
290 level = MAX_RADEON_LEVEL; 283 level = RADEON_MAX_BL_LEVEL;
291 else 284 else
292 level = bd->props.brightness; 285 level = bd->props.brightness;
293 286
294 if (pdata->negative) 287 if (pdata->negative)
295 level = MAX_RADEON_LEVEL - level; 288 level = RADEON_MAX_BL_LEVEL - level;
296 289
297 return level; 290 return level;
298} 291}
299 292
300static int radeon_legacy_backlight_update_status(struct backlight_device *bd) 293u8
294radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
295{
296 struct drm_device *dev = radeon_encoder->base.dev;
297 struct radeon_device *rdev = dev->dev_private;
298 u8 backlight_level;
299
300 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
301 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
302
303 return backlight_level;
304}
305
306void
307radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
301{ 308{
302 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
303 struct radeon_encoder *radeon_encoder = pdata->encoder;
304 struct drm_device *dev = radeon_encoder->base.dev; 309 struct drm_device *dev = radeon_encoder->base.dev;
305 struct radeon_device *rdev = dev->dev_private; 310 struct radeon_device *rdev = dev->dev_private;
306 int dpms_mode = DRM_MODE_DPMS_ON; 311 int dpms_mode = DRM_MODE_DPMS_ON;
@@ -308,19 +313,31 @@ static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
308 if (radeon_encoder->enc_priv) { 313 if (radeon_encoder->enc_priv) {
309 if (rdev->is_atom_bios) { 314 if (rdev->is_atom_bios) {
310 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv; 315 struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
311 dpms_mode = lvds->dpms_mode; 316 if (lvds->backlight_level > 0)
312 lvds->backlight_level = radeon_legacy_lvds_level(bd); 317 dpms_mode = lvds->dpms_mode;
318 else
319 dpms_mode = DRM_MODE_DPMS_OFF;
320 lvds->backlight_level = level;
313 } else { 321 } else {
314 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv; 322 struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
315 dpms_mode = lvds->dpms_mode; 323 if (lvds->backlight_level > 0)
316 lvds->backlight_level = radeon_legacy_lvds_level(bd); 324 dpms_mode = lvds->dpms_mode;
325 else
326 dpms_mode = DRM_MODE_DPMS_OFF;
327 lvds->backlight_level = level;
317 } 328 }
318 } 329 }
319 330
320 if (bd->props.brightness > 0) 331 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
321 radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); 332}
322 else 333
323 radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF); 334static int radeon_legacy_backlight_update_status(struct backlight_device *bd)
335{
336 struct radeon_backlight_privdata *pdata = bl_get_data(bd);
337 struct radeon_encoder *radeon_encoder = pdata->encoder;
338
339 radeon_legacy_set_backlight_level(radeon_encoder,
340 radeon_legacy_lvds_level(bd));
324 341
325 return 0; 342 return 0;
326} 343}
@@ -336,7 +353,7 @@ static int radeon_legacy_backlight_get_brightness(struct backlight_device *bd)
336 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >> 353 backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
337 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff; 354 RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
338 355
339 return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level; 356 return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
340} 357}
341 358
342static const struct backlight_ops radeon_backlight_ops = { 359static const struct backlight_ops radeon_backlight_ops = {
@@ -370,7 +387,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
370 } 387 }
371 388
372 memset(&props, 0, sizeof(props)); 389 memset(&props, 0, sizeof(props));
373 props.max_brightness = MAX_RADEON_LEVEL; 390 props.max_brightness = RADEON_MAX_BL_LEVEL;
374 props.type = BACKLIGHT_RAW; 391 props.type = BACKLIGHT_RAW;
375 bd = backlight_device_register("radeon_bl", &drm_connector->kdev, 392 bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
376 pdata, &radeon_backlight_ops, &props); 393 pdata, &radeon_backlight_ops, &props);
@@ -449,7 +466,7 @@ static void radeon_legacy_backlight_exit(struct radeon_encoder *radeon_encoder)
449 } 466 }
450 467
451 if (bd) { 468 if (bd) {
452 struct radeon_legacy_backlight_privdata *pdata; 469 struct radeon_backlight_privdata *pdata;
453 470
454 pdata = bl_get_data(bd); 471 pdata = bl_get_data(bd);
455 backlight_device_unregister(bd); 472 backlight_device_unregister(bd);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0c28ca3964b1..92c5f473cf08 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -251,8 +251,23 @@ struct radeon_mode_info {
251 251
252 /* pointer to fbdev info structure */ 252 /* pointer to fbdev info structure */
253 struct radeon_fbdev *rfbdev; 253 struct radeon_fbdev *rfbdev;
254 /* firmware flags */
255 u16 firmware_flags;
256 /* pointer to backlight encoder */
257 struct radeon_encoder *bl_encoder;
254}; 258};
255 259
260#define RADEON_MAX_BL_LEVEL 0xFF
261
262#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
263
264struct radeon_backlight_privdata {
265 struct radeon_encoder *encoder;
266 uint8_t negative;
267};
268
269#endif
270
256#define MAX_H_CODE_TIMING_LEN 32 271#define MAX_H_CODE_TIMING_LEN 32
257#define MAX_V_CODE_TIMING_LEN 32 272#define MAX_V_CODE_TIMING_LEN 32
258 273
@@ -268,6 +283,18 @@ struct radeon_tv_regs {
268 uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN]; 283 uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
269}; 284};
270 285
286struct radeon_atom_ss {
287 uint16_t percentage;
288 uint8_t type;
289 uint16_t step;
290 uint8_t delay;
291 uint8_t range;
292 uint8_t refdiv;
293 /* asic_ss */
294 uint16_t rate;
295 uint16_t amount;
296};
297
271struct radeon_crtc { 298struct radeon_crtc {
272 struct drm_crtc base; 299 struct drm_crtc base;
273 int crtc_id; 300 int crtc_id;
@@ -292,6 +319,16 @@ struct radeon_crtc {
292 /* page flipping */ 319 /* page flipping */
293 struct radeon_unpin_work *unpin_work; 320 struct radeon_unpin_work *unpin_work;
294 int deferred_flip_completion; 321 int deferred_flip_completion;
322 /* pll sharing */
323 struct radeon_atom_ss ss;
324 bool ss_enabled;
325 u32 adjusted_clock;
326 int bpc;
327 u32 pll_reference_div;
328 u32 pll_post_div;
329 u32 pll_flags;
330 struct drm_encoder *encoder;
331 struct drm_connector *connector;
295}; 332};
296 333
297struct radeon_encoder_primary_dac { 334struct radeon_encoder_primary_dac {
@@ -345,18 +382,6 @@ struct radeon_encoder_ext_tmds {
345}; 382};
346 383
347/* spread spectrum */ 384/* spread spectrum */
348struct radeon_atom_ss {
349 uint16_t percentage;
350 uint8_t type;
351 uint16_t step;
352 uint8_t delay;
353 uint8_t range;
354 uint8_t refdiv;
355 /* asic_ss */
356 uint16_t rate;
357 uint16_t amount;
358};
359
360struct radeon_encoder_atom_dig { 385struct radeon_encoder_atom_dig {
361 bool linkb; 386 bool linkb;
362 /* atom dig */ 387 /* atom dig */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 56ed724b398d..8b27dd6e3144 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,7 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
52 52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { 53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */ 54 /* remove from all vm address space */
55 radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); 55 radeon_vm_bo_rmv(bo->rdev, bo_va);
56 } 56 }
57} 57}
58 58
@@ -627,18 +627,17 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
627/** 627/**
628 * radeon_bo_reserve - reserve bo 628 * radeon_bo_reserve - reserve bo
629 * @bo: bo structure 629 * @bo: bo structure
630 * @no_wait: don't sleep while trying to reserve (return -EBUSY) 630 * @no_intr: don't return -ERESTARTSYS on pending signal
631 * 631 *
632 * Returns: 632 * Returns:
633 * -EBUSY: buffer is busy and @no_wait is true
634 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 633 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
635 * a signal. Release all buffer reservations and return to user-space. 634 * a signal. Release all buffer reservations and return to user-space.
636 */ 635 */
637int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) 636int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
638{ 637{
639 int r; 638 int r;
640 639
641 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 640 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
642 if (unlikely(r != 0)) { 641 if (unlikely(r != 0)) {
643 if (r != -ERESTARTSYS) 642 if (r != -ERESTARTSYS)
644 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 643 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
@@ -646,16 +645,3 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
646 } 645 }
647 return 0; 646 return 0;
648} 647}
649
650/* object have to be reserved */
651struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
652{
653 struct radeon_bo_va *bo_va;
654
655 list_for_each_entry(bo_va, &rbo->va, bo_list) {
656 if (bo_va->vm == vm) {
657 return bo_va;
658 }
659 }
660 return NULL;
661}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 17fb99f177cf..93cd491fff2e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -52,7 +52,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
52 return 0; 52 return 0;
53} 53}
54 54
55int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait); 55int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
56 56
57static inline void radeon_bo_unreserve(struct radeon_bo *bo) 57static inline void radeon_bo_unreserve(struct radeon_bo *bo)
58{ 58{
@@ -141,8 +141,6 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
141 struct ttm_mem_reg *mem); 141 struct ttm_mem_reg *mem);
142extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 142extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
143extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 143extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
144extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
145 struct radeon_vm *vm);
146 144
147/* 145/*
148 * sub allocation 146 * sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3ef0319981d3..aa14dbb7e4fb 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -24,9 +24,6 @@
24#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h" 25#include "avivod.h"
26#include "atom.h" 26#include "atom.h"
27#ifdef CONFIG_ACPI
28#include <linux/acpi.h>
29#endif
30#include <linux/power_supply.h> 27#include <linux/power_supply.h>
31#include <linux/hwmon.h> 28#include <linux/hwmon.h>
32#include <linux/hwmon-sysfs.h> 29#include <linux/hwmon-sysfs.h>
@@ -36,7 +33,7 @@
36#define RADEON_WAIT_VBLANK_TIMEOUT 200 33#define RADEON_WAIT_VBLANK_TIMEOUT 200
37 34
38static const char *radeon_pm_state_type_name[5] = { 35static const char *radeon_pm_state_type_name[5] = {
39 "Default", 36 "",
40 "Powersave", 37 "Powersave",
41 "Battery", 38 "Battery",
42 "Balanced", 39 "Balanced",
@@ -50,8 +47,6 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
50static void radeon_pm_update_profile(struct radeon_device *rdev); 47static void radeon_pm_update_profile(struct radeon_device *rdev);
51static void radeon_pm_set_clocks(struct radeon_device *rdev); 48static void radeon_pm_set_clocks(struct radeon_device *rdev);
52 49
53#define ACPI_AC_CLASS "ac_adapter"
54
55int radeon_pm_get_type_index(struct radeon_device *rdev, 50int radeon_pm_get_type_index(struct radeon_device *rdev,
56 enum radeon_pm_state_type ps_type, 51 enum radeon_pm_state_type ps_type,
57 int instance) 52 int instance)
@@ -70,33 +65,17 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
70 return rdev->pm.default_power_state_index; 65 return rdev->pm.default_power_state_index;
71} 66}
72 67
73#ifdef CONFIG_ACPI 68void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
74static int radeon_acpi_event(struct notifier_block *nb,
75 unsigned long val,
76 void *data)
77{ 69{
78 struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); 70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
79 struct acpi_bus_event *entry = (struct acpi_bus_event *)data; 71 if (rdev->pm.profile == PM_PROFILE_AUTO) {
80 72 mutex_lock(&rdev->pm.mutex);
81 if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { 73 radeon_pm_update_profile(rdev);
82 if (power_supply_is_system_supplied() > 0) 74 radeon_pm_set_clocks(rdev);
83 DRM_DEBUG_DRIVER("pm: AC\n"); 75 mutex_unlock(&rdev->pm.mutex);
84 else
85 DRM_DEBUG_DRIVER("pm: DC\n");
86
87 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
88 if (rdev->pm.profile == PM_PROFILE_AUTO) {
89 mutex_lock(&rdev->pm.mutex);
90 radeon_pm_update_profile(rdev);
91 radeon_pm_set_clocks(rdev);
92 mutex_unlock(&rdev->pm.mutex);
93 }
94 } 76 }
95 } 77 }
96
97 return NOTIFY_OK;
98} 78}
99#endif
100 79
101static void radeon_pm_update_profile(struct radeon_device *rdev) 80static void radeon_pm_update_profile(struct radeon_device *rdev)
102{ 81{
@@ -188,8 +167,21 @@ static void radeon_set_power_state(struct radeon_device *rdev)
188 if (sclk > rdev->pm.default_sclk) 167 if (sclk > rdev->pm.default_sclk)
189 sclk = rdev->pm.default_sclk; 168 sclk = rdev->pm.default_sclk;
190 169
191 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 170 /* starting with BTC, there is one state that is used for both
192 clock_info[rdev->pm.requested_clock_mode_index].mclk; 171 * MH and SH. Difference is that we always use the high clock index for
172 * mclk.
173 */
174 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
175 (rdev->family >= CHIP_BARTS) &&
176 rdev->pm.active_crtc_count &&
177 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
178 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
179 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
180 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
181 else
182 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
183 clock_info[rdev->pm.requested_clock_mode_index].mclk;
184
193 if (mclk > rdev->pm.default_mclk) 185 if (mclk > rdev->pm.default_mclk)
194 mclk = rdev->pm.default_mclk; 186 mclk = rdev->pm.default_mclk;
195 187
@@ -253,18 +245,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
253 down_write(&rdev->pm.mclk_lock); 245 down_write(&rdev->pm.mclk_lock);
254 mutex_lock(&rdev->ring_lock); 246 mutex_lock(&rdev->ring_lock);
255 247
256 /* gui idle int has issues on older chips it seems */ 248 /* wait for the rings to drain */
257 if (rdev->family >= CHIP_R600) { 249 for (i = 0; i < RADEON_NUM_RINGS; i++) {
258 if (rdev->irq.installed) { 250 struct radeon_ring *ring = &rdev->ring[i];
259 /* wait for GPU to become idle */ 251 if (ring->ready)
260 radeon_irq_kms_wait_gui_idle(rdev); 252 radeon_fence_wait_empty_locked(rdev, i);
261 }
262 } else {
263 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
264 if (ring->ready) {
265 radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
266 }
267 } 253 }
254
268 radeon_unmap_vram_bos(rdev); 255 radeon_unmap_vram_bos(rdev);
269 256
270 if (rdev->irq.installed) { 257 if (rdev->irq.installed) {
@@ -320,17 +307,15 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
320 for (j = 0; j < power_state->num_clock_modes; j++) { 307 for (j = 0; j < power_state->num_clock_modes; j++) {
321 clock_info = &(power_state->clock_info[j]); 308 clock_info = &(power_state->clock_info[j]);
322 if (rdev->flags & RADEON_IS_IGP) 309 if (rdev->flags & RADEON_IS_IGP)
323 DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", 310 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
324 j, 311 j,
325 clock_info->sclk * 10, 312 clock_info->sclk * 10);
326 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
327 else 313 else
328 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", 314 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
329 j, 315 j,
330 clock_info->sclk * 10, 316 clock_info->sclk * 10,
331 clock_info->mclk * 10, 317 clock_info->mclk * 10,
332 clock_info->voltage.voltage, 318 clock_info->voltage.voltage);
333 clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
334 } 319 }
335 } 320 }
336} 321}
@@ -547,7 +532,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
547void radeon_pm_resume(struct radeon_device *rdev) 532void radeon_pm_resume(struct radeon_device *rdev)
548{ 533{
549 /* set up the default clocks if the MC ucode is loaded */ 534 /* set up the default clocks if the MC ucode is loaded */
550 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 535 if ((rdev->family >= CHIP_BARTS) &&
536 (rdev->family <= CHIP_CAYMAN) &&
537 rdev->mc_fw) {
551 if (rdev->pm.default_vddc) 538 if (rdev->pm.default_vddc)
552 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 539 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
553 SET_VOLTAGE_TYPE_ASIC_VDDC); 540 SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -602,7 +589,9 @@ int radeon_pm_init(struct radeon_device *rdev)
602 radeon_pm_print_states(rdev); 589 radeon_pm_print_states(rdev);
603 radeon_pm_init_profile(rdev); 590 radeon_pm_init_profile(rdev);
604 /* set up the default clocks if the MC ucode is loaded */ 591 /* set up the default clocks if the MC ucode is loaded */
605 if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { 592 if ((rdev->family >= CHIP_BARTS) &&
593 (rdev->family <= CHIP_CAYMAN) &&
594 rdev->mc_fw) {
606 if (rdev->pm.default_vddc) 595 if (rdev->pm.default_vddc)
607 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 596 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
608 SET_VOLTAGE_TYPE_ASIC_VDDC); 597 SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -632,10 +621,6 @@ int radeon_pm_init(struct radeon_device *rdev)
632 if (ret) 621 if (ret)
633 DRM_ERROR("failed to create device file for power method\n"); 622 DRM_ERROR("failed to create device file for power method\n");
634 623
635#ifdef CONFIG_ACPI
636 rdev->acpi_nb.notifier_call = radeon_acpi_event;
637 register_acpi_notifier(&rdev->acpi_nb);
638#endif
639 if (radeon_debugfs_pm_init(rdev)) { 624 if (radeon_debugfs_pm_init(rdev)) {
640 DRM_ERROR("Failed to register debugfs file for PM!\n"); 625 DRM_ERROR("Failed to register debugfs file for PM!\n");
641 } 626 }
@@ -666,9 +651,6 @@ void radeon_pm_fini(struct radeon_device *rdev)
666 651
667 device_remove_file(rdev->dev, &dev_attr_power_profile); 652 device_remove_file(rdev->dev, &dev_attr_power_profile);
668 device_remove_file(rdev->dev, &dev_attr_power_method); 653 device_remove_file(rdev->dev, &dev_attr_power_method);
669#ifdef CONFIG_ACPI
670 unregister_acpi_notifier(&rdev->acpi_nb);
671#endif
672 } 654 }
673 655
674 if (rdev->pm.power_state) 656 if (rdev->pm.power_state)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fc209c8b8666..bba66902c83b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -43,7 +43,7 @@
43 * produce command buffers which are send to the kernel and 43 * produce command buffers which are send to the kernel and
44 * put in IBs for execution by the requested ring. 44 * put in IBs for execution by the requested ring.
45 */ 45 */
46int radeon_debugfs_sa_init(struct radeon_device *rdev); 46static int radeon_debugfs_sa_init(struct radeon_device *rdev);
47 47
48/** 48/**
49 * radeon_ib_get - request an IB (Indirect Buffer) 49 * radeon_ib_get - request an IB (Indirect Buffer)
@@ -58,7 +58,8 @@ int radeon_debugfs_sa_init(struct radeon_device *rdev);
58 * Returns 0 on success, error on failure. 58 * Returns 0 on success, error on failure.
59 */ 59 */
60int radeon_ib_get(struct radeon_device *rdev, int ring, 60int radeon_ib_get(struct radeon_device *rdev, int ring,
61 struct radeon_ib *ib, unsigned size) 61 struct radeon_ib *ib, struct radeon_vm *vm,
62 unsigned size)
62{ 63{
63 int i, r; 64 int i, r;
64 65
@@ -76,8 +77,15 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
76 ib->ring = ring; 77 ib->ring = ring;
77 ib->fence = NULL; 78 ib->fence = NULL;
78 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo); 79 ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
79 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); 80 ib->vm = vm;
80 ib->vm_id = 0; 81 if (vm) {
82 /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
83 * space and soffset is the offset inside the pool bo
84 */
85 ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
86 } else {
87 ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
88 }
81 ib->is_const_ib = false; 89 ib->is_const_ib = false;
82 for (i = 0; i < RADEON_NUM_RINGS; ++i) 90 for (i = 0; i < RADEON_NUM_RINGS; ++i)
83 ib->sync_to[i] = NULL; 91 ib->sync_to[i] = NULL;
@@ -152,6 +160,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
152 if (!need_sync) { 160 if (!need_sync) {
153 radeon_semaphore_free(rdev, &ib->semaphore, NULL); 161 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
154 } 162 }
163 /* if we can't remember our last VM flush then flush now! */
164 if (ib->vm && !ib->vm->last_flush) {
165 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
166 }
155 if (const_ib) { 167 if (const_ib) {
156 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 168 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
157 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 169 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
@@ -166,6 +178,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
166 if (const_ib) { 178 if (const_ib) {
167 const_ib->fence = radeon_fence_ref(ib->fence); 179 const_ib->fence = radeon_fence_ref(ib->fence);
168 } 180 }
181 /* we just flushed the VM, remember that */
182 if (ib->vm && !ib->vm->last_flush) {
183 ib->vm->last_flush = radeon_fence_ref(ib->fence);
184 }
169 radeon_ring_unlock_commit(rdev, ring); 185 radeon_ring_unlock_commit(rdev, ring);
170 return 0; 186 return 0;
171} 187}
@@ -275,7 +291,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
275 * wptr. The GPU then starts fetching commands and executes 291 * wptr. The GPU then starts fetching commands and executes
276 * them until the pointers are equal again. 292 * them until the pointers are equal again.
277 */ 293 */
278int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring); 294static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
279 295
280/** 296/**
281 * radeon_ring_write - write a value to the ring 297 * radeon_ring_write - write a value to the ring
@@ -803,7 +819,7 @@ static struct drm_info_list radeon_debugfs_sa_list[] = {
803 819
804#endif 820#endif
805 821
806int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) 822static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
807{ 823{
808#if defined(CONFIG_DEBUG_FS) 824#if defined(CONFIG_DEBUG_FS)
809 unsigned i; 825 unsigned i;
@@ -823,7 +839,7 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
823 return 0; 839 return 0;
824} 840}
825 841
826int radeon_debugfs_sa_init(struct radeon_device *rdev) 842static int radeon_debugfs_sa_init(struct radeon_device *rdev)
827{ 843{
828#if defined(CONFIG_DEBUG_FS) 844#if defined(CONFIG_DEBUG_FS)
829 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1); 845 return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 83e889b9420b..cb800995d4f9 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -315,7 +315,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
315{ 315{
316 struct radeon_fence *fences[RADEON_NUM_RINGS]; 316 struct radeon_fence *fences[RADEON_NUM_RINGS];
317 unsigned tries[RADEON_NUM_RINGS]; 317 unsigned tries[RADEON_NUM_RINGS];
318 int i, r = -ENOMEM; 318 int i, r;
319 319
320 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 320 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
321 BUG_ON(size > sa_manager->size); 321 BUG_ON(size > sa_manager->size);
@@ -330,7 +330,7 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
330 INIT_LIST_HEAD(&(*sa_bo)->flist); 330 INIT_LIST_HEAD(&(*sa_bo)->flist);
331 331
332 spin_lock(&sa_manager->wq.lock); 332 spin_lock(&sa_manager->wq.lock);
333 while(1) { 333 do {
334 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 334 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
335 fences[i] = NULL; 335 fences[i] = NULL;
336 tries[i] = 0; 336 tries[i] = 0;
@@ -348,26 +348,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
348 /* see if we can skip over some allocations */ 348 /* see if we can skip over some allocations */
349 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 349 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
350 350
351 if (!block) {
352 break;
353 }
354
355 spin_unlock(&sa_manager->wq.lock); 351 spin_unlock(&sa_manager->wq.lock);
356 r = radeon_fence_wait_any(rdev, fences, false); 352 r = radeon_fence_wait_any(rdev, fences, false);
357 spin_lock(&sa_manager->wq.lock); 353 spin_lock(&sa_manager->wq.lock);
358 /* if we have nothing to wait for block */ 354 /* if we have nothing to wait for block */
359 if (r == -ENOENT) { 355 if (r == -ENOENT && block) {
360 r = wait_event_interruptible_locked( 356 r = wait_event_interruptible_locked(
361 sa_manager->wq, 357 sa_manager->wq,
362 radeon_sa_event(sa_manager, size, align) 358 radeon_sa_event(sa_manager, size, align)
363 ); 359 );
360
361 } else if (r == -ENOENT) {
362 r = -ENOMEM;
364 } 363 }
365 if (r) {
366 goto out_err;
367 }
368 };
369 364
370out_err: 365 } while (!r);
366
371 spin_unlock(&sa_manager->wq.lock); 367 spin_unlock(&sa_manager->wq.lock);
372 kfree(*sa_bo); 368 kfree(*sa_bo);
373 *sa_bo = NULL; 369 *sa_bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 7c16540c10ff..587c09a00ba2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -313,7 +313,7 @@ out_cleanup:
313 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r); 313 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
314} 314}
315 315
316void radeon_test_ring_sync2(struct radeon_device *rdev, 316static void radeon_test_ring_sync2(struct radeon_device *rdev,
317 struct radeon_ring *ringA, 317 struct radeon_ring *ringA,
318 struct radeon_ring *ringB, 318 struct radeon_ring *ringB,
319 struct radeon_ring *ringC) 319 struct radeon_ring *ringC)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5b71c716d83f..5ebe1b3e5db2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -549,7 +549,7 @@ static struct ttm_backend_func radeon_backend_func = {
549 .destroy = &radeon_ttm_backend_destroy, 549 .destroy = &radeon_ttm_backend_destroy,
550}; 550};
551 551
552struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, 552static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
553 unsigned long size, uint32_t page_flags, 553 unsigned long size, uint32_t page_flags,
554 struct page *dummy_read_page) 554 struct page *dummy_read_page)
555{ 555{
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 2752f7f78237..73051ce3121e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -242,7 +242,7 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
242 return -1; 242 return -1;
243} 243}
244 244
245void rs400_gpu_init(struct radeon_device *rdev) 245static void rs400_gpu_init(struct radeon_device *rdev)
246{ 246{
247 /* FIXME: is this correct ? */ 247 /* FIXME: is this correct ? */
248 r420_pipes_init(rdev); 248 r420_pipes_init(rdev);
@@ -252,7 +252,7 @@ void rs400_gpu_init(struct radeon_device *rdev)
252 } 252 }
253} 253}
254 254
255void rs400_mc_init(struct radeon_device *rdev) 255static void rs400_mc_init(struct radeon_device *rdev)
256{ 256{
257 u64 base; 257 u64 base;
258 258
@@ -370,7 +370,7 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
370#endif 370#endif
371} 371}
372 372
373void rs400_mc_program(struct radeon_device *rdev) 373static void rs400_mc_program(struct radeon_device *rdev)
374{ 374{
375 struct r100_mc_save save; 375 struct r100_mc_save save;
376 376
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6590cc128f36..5a0fc74c2ba6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -43,22 +43,30 @@
43 43
44#include "rs600_reg_safe.h" 44#include "rs600_reg_safe.h"
45 45
46void rs600_gpu_init(struct radeon_device *rdev); 46static void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49static const u32 crtc_offsets[2] =
50{
51 0,
52 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
53};
54
49void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) 55void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
50{ 56{
51 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
52 int i; 57 int i;
53 58
54 if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) { 59 if (crtc >= rdev->num_crtc)
60 return;
61
62 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
55 for (i = 0; i < rdev->usec_timeout; i++) { 63 for (i = 0; i < rdev->usec_timeout; i++) {
56 if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)) 64 if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
57 break; 65 break;
58 udelay(1); 66 udelay(1);
59 } 67 }
60 for (i = 0; i < rdev->usec_timeout; i++) { 68 for (i = 0; i < rdev->usec_timeout; i++) {
61 if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK) 69 if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
62 break; 70 break;
63 udelay(1); 71 udelay(1);
64 } 72 }
@@ -424,7 +432,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
424 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 432 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
425} 433}
426 434
427int rs600_gart_init(struct radeon_device *rdev) 435static int rs600_gart_init(struct radeon_device *rdev)
428{ 436{
429 int r; 437 int r;
430 438
@@ -506,7 +514,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
506 return 0; 514 return 0;
507} 515}
508 516
509void rs600_gart_disable(struct radeon_device *rdev) 517static void rs600_gart_disable(struct radeon_device *rdev)
510{ 518{
511 u32 tmp; 519 u32 tmp;
512 520
@@ -517,7 +525,7 @@ void rs600_gart_disable(struct radeon_device *rdev)
517 radeon_gart_table_vram_unpin(rdev); 525 radeon_gart_table_vram_unpin(rdev);
518} 526}
519 527
520void rs600_gart_fini(struct radeon_device *rdev) 528static void rs600_gart_fini(struct radeon_device *rdev)
521{ 529{
522 radeon_gart_fini(rdev); 530 radeon_gart_fini(rdev);
523 rs600_gart_disable(rdev); 531 rs600_gart_disable(rdev);
@@ -567,9 +575,6 @@ int rs600_irq_set(struct radeon_device *rdev)
567 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 575 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
568 tmp |= S_000040_SW_INT_EN(1); 576 tmp |= S_000040_SW_INT_EN(1);
569 } 577 }
570 if (rdev->irq.gui_idle) {
571 tmp |= S_000040_GUI_IDLE(1);
572 }
573 if (rdev->irq.crtc_vblank_int[0] || 578 if (rdev->irq.crtc_vblank_int[0] ||
574 atomic_read(&rdev->irq.pflip[0])) { 579 atomic_read(&rdev->irq.pflip[0])) {
575 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 580 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
@@ -602,12 +607,6 @@ static inline u32 rs600_irq_ack(struct radeon_device *rdev)
602 uint32_t irq_mask = S_000044_SW_INT(1); 607 uint32_t irq_mask = S_000044_SW_INT(1);
603 u32 tmp; 608 u32 tmp;
604 609
605 /* the interrupt works, but the status bit is permanently asserted */
606 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
607 if (!rdev->irq.gui_idle_acked)
608 irq_mask |= S_000044_GUI_IDLE_STAT(1);
609 }
610
611 if (G_000044_DISPLAY_INT_STAT(irqs)) { 610 if (G_000044_DISPLAY_INT_STAT(irqs)) {
612 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 611 rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
613 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 612 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -667,9 +666,6 @@ int rs600_irq_process(struct radeon_device *rdev)
667 bool queue_hotplug = false; 666 bool queue_hotplug = false;
668 bool queue_hdmi = false; 667 bool queue_hdmi = false;
669 668
670 /* reset gui idle ack. the status bit is broken */
671 rdev->irq.gui_idle_acked = false;
672
673 status = rs600_irq_ack(rdev); 669 status = rs600_irq_ack(rdev);
674 if (!status && 670 if (!status &&
675 !rdev->irq.stat_regs.r500.disp_int && 671 !rdev->irq.stat_regs.r500.disp_int &&
@@ -683,11 +679,6 @@ int rs600_irq_process(struct radeon_device *rdev)
683 if (G_000044_SW_INT(status)) { 679 if (G_000044_SW_INT(status)) {
684 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 680 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
685 } 681 }
686 /* GUI idle */
687 if (G_000040_GUI_IDLE(status)) {
688 rdev->irq.gui_idle_acked = true;
689 wake_up(&rdev->irq.idle_queue);
690 }
691 /* Vertical blank interrupts */ 682 /* Vertical blank interrupts */
692 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { 683 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
693 if (rdev->irq.crtc_vblank_int[0]) { 684 if (rdev->irq.crtc_vblank_int[0]) {
@@ -721,8 +712,6 @@ int rs600_irq_process(struct radeon_device *rdev)
721 } 712 }
722 status = rs600_irq_ack(rdev); 713 status = rs600_irq_ack(rdev);
723 } 714 }
724 /* reset gui idle ack. the status bit is broken */
725 rdev->irq.gui_idle_acked = false;
726 if (queue_hotplug) 715 if (queue_hotplug)
727 schedule_work(&rdev->hotplug_work); 716 schedule_work(&rdev->hotplug_work);
728 if (queue_hdmi) 717 if (queue_hdmi)
@@ -764,7 +753,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
764 return -1; 753 return -1;
765} 754}
766 755
767void rs600_gpu_init(struct radeon_device *rdev) 756static void rs600_gpu_init(struct radeon_device *rdev)
768{ 757{
769 r420_pipes_init(rdev); 758 r420_pipes_init(rdev);
770 /* Wait for mc idle */ 759 /* Wait for mc idle */
@@ -772,7 +761,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
772 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); 761 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
773} 762}
774 763
775void rs600_mc_init(struct radeon_device *rdev) 764static void rs600_mc_init(struct radeon_device *rdev)
776{ 765{
777 u64 base; 766 u64 base;
778 767
@@ -834,7 +823,7 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
834 WREG32(R_000074_MC_IND_DATA, v); 823 WREG32(R_000074_MC_IND_DATA, v);
835} 824}
836 825
837void rs600_debugfs(struct radeon_device *rdev) 826static void rs600_debugfs(struct radeon_device *rdev)
838{ 827{
839 if (r100_debugfs_rbbm_init(rdev)) 828 if (r100_debugfs_rbbm_init(rdev))
840 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 829 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index dfb9f0fe6f38..5706d2ac75ab 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -145,7 +145,7 @@ void rs690_pm_info(struct radeon_device *rdev)
145 rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp); 145 rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
146} 146}
147 147
148void rs690_mc_init(struct radeon_device *rdev) 148static void rs690_mc_init(struct radeon_device *rdev)
149{ 149{
150 u64 base; 150 u64 base;
151 151
@@ -224,7 +224,7 @@ struct rs690_watermark {
224 fixed20_12 sclk; 224 fixed20_12 sclk;
225}; 225};
226 226
227void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, 227static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
228 struct radeon_crtc *crtc, 228 struct radeon_crtc *crtc,
229 struct rs690_watermark *wm) 229 struct rs690_watermark *wm)
230{ 230{
@@ -581,7 +581,7 @@ void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
581 WREG32(R_000078_MC_INDEX, 0x7F); 581 WREG32(R_000078_MC_INDEX, 0x7F);
582} 582}
583 583
584void rs690_mc_program(struct radeon_device *rdev) 584static void rs690_mc_program(struct radeon_device *rdev)
585{ 585{
586 struct rv515_mc_save save; 586 struct rv515_mc_save save;
587 587
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ae4f93e2f135..785d09590b24 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -35,9 +35,9 @@
35#include "rv515_reg_safe.h" 35#include "rv515_reg_safe.h"
36 36
37/* This files gather functions specifics to: rv515 */ 37/* This files gather functions specifics to: rv515 */
38int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 38static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
39int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 39static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
40void rv515_gpu_init(struct radeon_device *rdev); 40static void rv515_gpu_init(struct radeon_device *rdev);
41int rv515_mc_wait_for_idle(struct radeon_device *rdev); 41int rv515_mc_wait_for_idle(struct radeon_device *rdev);
42 42
43void rv515_debugfs(struct radeon_device *rdev) 43void rv515_debugfs(struct radeon_device *rdev)
@@ -143,7 +143,7 @@ void rv515_vga_render_disable(struct radeon_device *rdev)
143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
144} 144}
145 145
146void rv515_gpu_init(struct radeon_device *rdev) 146static void rv515_gpu_init(struct radeon_device *rdev)
147{ 147{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 148 unsigned pipe_select_current, gb_pipe_select, tmp;
149 149
@@ -189,7 +189,7 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
189 } 189 }
190} 190}
191 191
192void rv515_mc_init(struct radeon_device *rdev) 192static void rv515_mc_init(struct radeon_device *rdev)
193{ 193{
194 194
195 rv515_vram_get_type(rdev); 195 rv515_vram_get_type(rdev);
@@ -261,7 +261,7 @@ static struct drm_info_list rv515_ga_info_list[] = {
261}; 261};
262#endif 262#endif
263 263
264int rv515_debugfs_pipes_info_init(struct radeon_device *rdev) 264static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
265{ 265{
266#if defined(CONFIG_DEBUG_FS) 266#if defined(CONFIG_DEBUG_FS)
267 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1); 267 return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
@@ -270,7 +270,7 @@ int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
270#endif 270#endif
271} 271}
272 272
273int rv515_debugfs_ga_info_init(struct radeon_device *rdev) 273static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
274{ 274{
275#if defined(CONFIG_DEBUG_FS) 275#if defined(CONFIG_DEBUG_FS)
276 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1); 276 return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
@@ -310,7 +310,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
311} 311}
312 312
313void rv515_mc_program(struct radeon_device *rdev) 313static void rv515_mc_program(struct radeon_device *rdev)
314{ 314{
315 struct rv515_mc_save save; 315 struct rv515_mc_save save;
316 316
@@ -787,7 +787,7 @@ struct rv515_watermark {
787 fixed20_12 sclk; 787 fixed20_12 sclk;
788}; 788};
789 789
790void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, 790static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
791 struct radeon_crtc *crtc, 791 struct radeon_crtc *crtc,
792 struct rv515_watermark *wm) 792 struct rv515_watermark *wm)
793{ 793{
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 35a4152bb1ad..79814a08c8e5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
124/* 124/*
125 * GART 125 * GART
126 */ 126 */
127int rv770_pcie_gart_enable(struct radeon_device *rdev) 127static int rv770_pcie_gart_enable(struct radeon_device *rdev)
128{ 128{
129 u32 tmp; 129 u32 tmp;
130 int r, i; 130 int r, i;
@@ -175,7 +175,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
175 return 0; 175 return 0;
176} 176}
177 177
178void rv770_pcie_gart_disable(struct radeon_device *rdev) 178static void rv770_pcie_gart_disable(struct radeon_device *rdev)
179{ 179{
180 u32 tmp; 180 u32 tmp;
181 int i; 181 int i;
@@ -201,7 +201,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
201 radeon_gart_table_vram_unpin(rdev); 201 radeon_gart_table_vram_unpin(rdev);
202} 202}
203 203
204void rv770_pcie_gart_fini(struct radeon_device *rdev) 204static void rv770_pcie_gart_fini(struct radeon_device *rdev)
205{ 205{
206 radeon_gart_fini(rdev); 206 radeon_gart_fini(rdev);
207 rv770_pcie_gart_disable(rdev); 207 rv770_pcie_gart_disable(rdev);
@@ -209,7 +209,7 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev)
209} 209}
210 210
211 211
212void rv770_agp_enable(struct radeon_device *rdev) 212static void rv770_agp_enable(struct radeon_device *rdev)
213{ 213{
214 u32 tmp; 214 u32 tmp;
215 int i; 215 int i;
@@ -839,7 +839,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
839 } 839 }
840} 840}
841 841
842int rv770_mc_init(struct radeon_device *rdev) 842static int rv770_mc_init(struct radeon_device *rdev)
843{ 843{
844 u32 tmp; 844 u32 tmp;
845 int chansize, numchan; 845 int chansize, numchan;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d883cae56378..f79633a036c3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1806,13 +1806,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1806#endif 1806#endif
1807 (ib->gpu_addr & 0xFFFFFFFC)); 1807 (ib->gpu_addr & 0xFFFFFFFC));
1808 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 1808 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
1809 radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); 1809 radeon_ring_write(ring, ib->length_dw |
1810 (ib->vm ? (ib->vm->id << 24) : 0));
1810 1811
1811 if (!ib->is_const_ib) { 1812 if (!ib->is_const_ib) {
1812 /* flush read cache over gart for this vmid */ 1813 /* flush read cache over gart for this vmid */
1813 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1814 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1814 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 1815 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
1815 radeon_ring_write(ring, ib->vm_id); 1816 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
1816 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1817 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1817 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 1818 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
1818 PACKET3_TC_ACTION_ENA | 1819 PACKET3_TC_ACTION_ENA |
@@ -2363,7 +2364,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
2363 WREG32(VM_INVALIDATE_REQUEST, 1); 2364 WREG32(VM_INVALIDATE_REQUEST, 1);
2364} 2365}
2365 2366
2366int si_pcie_gart_enable(struct radeon_device *rdev) 2367static int si_pcie_gart_enable(struct radeon_device *rdev)
2367{ 2368{
2368 int r, i; 2369 int r, i;
2369 2370
@@ -2425,7 +2426,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
2425 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 2426 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2426 (u32)(rdev->dummy_page.addr >> 12)); 2427 (u32)(rdev->dummy_page.addr >> 12));
2427 WREG32(VM_CONTEXT1_CNTL2, 0); 2428 WREG32(VM_CONTEXT1_CNTL2, 0);
2428 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 2429 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2429 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 2430 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
2430 2431
2431 si_pcie_gart_tlb_flush(rdev); 2432 si_pcie_gart_tlb_flush(rdev);
@@ -2436,7 +2437,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
2436 return 0; 2437 return 0;
2437} 2438}
2438 2439
2439void si_pcie_gart_disable(struct radeon_device *rdev) 2440static void si_pcie_gart_disable(struct radeon_device *rdev)
2440{ 2441{
2441 /* Disable all tables */ 2442 /* Disable all tables */
2442 WREG32(VM_CONTEXT0_CNTL, 0); 2443 WREG32(VM_CONTEXT0_CNTL, 0);
@@ -2455,7 +2456,7 @@ void si_pcie_gart_disable(struct radeon_device *rdev)
2455 radeon_gart_table_vram_unpin(rdev); 2456 radeon_gart_table_vram_unpin(rdev);
2456} 2457}
2457 2458
2458void si_pcie_gart_fini(struct radeon_device *rdev) 2459static void si_pcie_gart_fini(struct radeon_device *rdev)
2459{ 2460{
2460 si_pcie_gart_disable(rdev); 2461 si_pcie_gart_disable(rdev);
2461 radeon_gart_table_vram_free(rdev); 2462 radeon_gart_table_vram_free(rdev);
@@ -2788,41 +2789,84 @@ void si_vm_fini(struct radeon_device *rdev)
2788{ 2789{
2789} 2790}
2790 2791
2791int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) 2792/**
2793 * si_vm_set_page - update the page tables using the CP
2794 *
2795 * @rdev: radeon_device pointer
2796 * @pe: addr of the page entry
2797 * @addr: dst addr to write into pe
2798 * @count: number of page entries to update
2799 * @incr: increase next addr by incr bytes
2800 * @flags: access flags
2801 *
2802 * Update the page tables using the CP (cayman-si).
2803 */
2804void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2805 uint64_t addr, unsigned count,
2806 uint32_t incr, uint32_t flags)
2792{ 2807{
2793 if (id < 8) 2808 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2794 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); 2809 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2795 else 2810 int i;
2796 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2), 2811 uint64_t value;
2797 vm->pt_gpu_addr >> 12); 2812
2798 /* flush hdp cache */ 2813 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
2799 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2814 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2800 /* bits 0-15 are the VM contexts0-15 */ 2815 WRITE_DATA_DST_SEL(1)));
2801 WREG32(VM_INVALIDATE_REQUEST, 1 << id); 2816 radeon_ring_write(ring, pe);
2802 return 0; 2817 radeon_ring_write(ring, upper_32_bits(pe));
2818 for (i = 0; i < count; ++i) {
2819 if (flags & RADEON_VM_PAGE_SYSTEM) {
2820 value = radeon_vm_map_gart(rdev, addr);
2821 value &= 0xFFFFFFFFFFFFF000ULL;
2822 } else if (flags & RADEON_VM_PAGE_VALID)
2823 value = addr;
2824 else
2825 value = 0;
2826 addr += incr;
2827 value |= r600_flags;
2828 radeon_ring_write(ring, value);
2829 radeon_ring_write(ring, upper_32_bits(value));
2830 }
2803} 2831}
2804 2832
2805void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) 2833void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2806{ 2834{
2807 if (vm->id < 8) 2835 struct radeon_ring *ring = &rdev->ring[ridx];
2808 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
2809 else
2810 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
2811 /* flush hdp cache */
2812 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
2813 /* bits 0-15 are the VM contexts0-15 */
2814 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
2815}
2816 2836
2817void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) 2837 if (vm == NULL)
2818{
2819 if (vm->id == -1)
2820 return; 2838 return;
2821 2839
2840 /* write new base address */
2841 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2842 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2843 WRITE_DATA_DST_SEL(0)));
2844
2845 if (vm->id < 8) {
2846 radeon_ring_write(ring,
2847 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
2848 } else {
2849 radeon_ring_write(ring,
2850 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
2851 }
2852 radeon_ring_write(ring, 0);
2853 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2854
2822 /* flush hdp cache */ 2855 /* flush hdp cache */
2823 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 2856 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2857 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2858 WRITE_DATA_DST_SEL(0)));
2859 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
2860 radeon_ring_write(ring, 0);
2861 radeon_ring_write(ring, 0x1);
2862
2824 /* bits 0-15 are the VM contexts0-15 */ 2863 /* bits 0-15 are the VM contexts0-15 */
2825 WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); 2864 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2865 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2866 WRITE_DATA_DST_SEL(0)));
2867 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2868 radeon_ring_write(ring, 0);
2869 radeon_ring_write(ring, 1 << vm->id);
2826} 2870}
2827 2871
2828/* 2872/*
@@ -3199,10 +3243,6 @@ int si_irq_set(struct radeon_device *rdev)
3199 DRM_DEBUG("si_irq_set: hpd 6\n"); 3243 DRM_DEBUG("si_irq_set: hpd 6\n");
3200 hpd6 |= DC_HPDx_INT_EN; 3244 hpd6 |= DC_HPDx_INT_EN;
3201 } 3245 }
3202 if (rdev->irq.gui_idle) {
3203 DRM_DEBUG("gui idle\n");
3204 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3205 }
3206 3246
3207 WREG32(CP_INT_CNTL_RING0, cp_int_cntl); 3247 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
3208 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); 3248 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
@@ -3658,7 +3698,6 @@ restart_ih:
3658 break; 3698 break;
3659 case 233: /* GUI IDLE */ 3699 case 233: /* GUI IDLE */
3660 DRM_DEBUG("IH: GUI idle\n"); 3700 DRM_DEBUG("IH: GUI idle\n");
3661 wake_up(&rdev->irq.idle_queue);
3662 break; 3701 break;
3663 default: 3702 default:
3664 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3703 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index ef4815c27b1c..7d2a20e56577 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -812,6 +812,21 @@
812#define PACKET3_DRAW_INDEX_OFFSET_2 0x35 812#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
813#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36 813#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
814#define PACKET3_WRITE_DATA 0x37 814#define PACKET3_WRITE_DATA 0x37
815#define WRITE_DATA_DST_SEL(x) ((x) << 8)
816 /* 0 - register
817 * 1 - memory (sync - via GRBM)
818 * 2 - tc/l2
819 * 3 - gds
820 * 4 - reserved
821 * 5 - memory (async - direct)
822 */
823#define WR_ONE_ADDR (1 << 16)
824#define WR_CONFIRM (1 << 20)
825#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
826 /* 0 - me
827 * 1 - pfp
828 * 2 - ce
829 */
815#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 830#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
816#define PACKET3_MEM_SEMAPHORE 0x39 831#define PACKET3_MEM_SEMAPHORE 0x39
817#define PACKET3_MPEG_INDEX 0x3A 832#define PACKET3_MPEG_INDEX 0x3A
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index b88a42154e16..b55c1d661147 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
547 547
548 dev_priv->chipset = (enum savage_family)chipset; 548 dev_priv->chipset = (enum savage_family)chipset;
549 549
550 pci_set_master(dev->pdev);
551
550 return 0; 552 return 0;
551} 553}
552 554
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
new file mode 100644
index 000000000000..7e7d52b2a2fc
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -0,0 +1,10 @@
1config DRM_SHMOBILE
2 tristate "DRM Support for SH Mobile"
3 depends on DRM && (SUPERH || ARCH_SHMOBILE)
4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER
7 help
8 Choose this option if you have an SH Mobile chipset.
9 If M is selected the module will be called shmob-drm.
10
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
new file mode 100644
index 000000000000..4c3eeb355630
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -0,0 +1,7 @@
1shmob-drm-y := shmob_drm_backlight.o \
2 shmob_drm_crtc.o \
3 shmob_drm_drv.o \
4 shmob_drm_kms.o \
5 shmob_drm_plane.o
6
7obj-$(CONFIG_DRM_SHMOBILE) += shmob-drm.o
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
new file mode 100644
index 000000000000..463aee18f774
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.c
@@ -0,0 +1,90 @@
1/*
2 * shmob_drm_backlight.c -- SH Mobile DRM Backlight
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/backlight.h>
15
16#include "shmob_drm_backlight.h"
17#include "shmob_drm_crtc.h"
18#include "shmob_drm_drv.h"
19
20static int shmob_drm_backlight_update(struct backlight_device *bdev)
21{
22 struct shmob_drm_connector *scon = bl_get_data(bdev);
23 struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
24 const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
25 int brightness = bdev->props.brightness;
26
27 if (bdev->props.power != FB_BLANK_UNBLANK ||
28 bdev->props.state & BL_CORE_SUSPENDED)
29 brightness = 0;
30
31 return bdata->set_brightness(brightness);
32}
33
34static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
35{
36 struct shmob_drm_connector *scon = bl_get_data(bdev);
37 struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
38 const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
39
40 return bdata->get_brightness();
41}
42
43static const struct backlight_ops shmob_drm_backlight_ops = {
44 .options = BL_CORE_SUSPENDRESUME,
45 .update_status = shmob_drm_backlight_update,
46 .get_brightness = shmob_drm_backlight_get_brightness,
47};
48
49void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
50{
51 if (scon->backlight == NULL)
52 return;
53
54 scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
55 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
56 backlight_update_status(scon->backlight);
57}
58
59int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
60{
61 struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
62 const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
63 struct drm_connector *connector = &scon->connector;
64 struct drm_device *dev = connector->dev;
65 struct backlight_device *backlight;
66
67 if (!bdata->max_brightness)
68 return 0;
69
70 backlight = backlight_device_register(bdata->name, dev->dev, scon,
71 &shmob_drm_backlight_ops, NULL);
72 if (IS_ERR(backlight)) {
73 dev_err(dev->dev, "unable to register backlight device: %ld\n",
74 PTR_ERR(backlight));
75 return PTR_ERR(backlight);
76 }
77
78 backlight->props.max_brightness = bdata->max_brightness;
79 backlight->props.brightness = bdata->max_brightness;
80 backlight->props.power = FB_BLANK_POWERDOWN;
81 backlight_update_status(backlight);
82
83 scon->backlight = backlight;
84 return 0;
85}
86
87void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
88{
89 backlight_device_unregister(scon->backlight);
90}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
new file mode 100644
index 000000000000..9477595d2ff3
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_backlight.h
@@ -0,0 +1,23 @@
1/*
2 * shmob_drm_backlight.h -- SH Mobile DRM Backlight
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_BACKLIGHT_H__
15#define __SHMOB_DRM_BACKLIGHT_H__
16
17struct shmob_drm_connector;
18
19void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
20int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
21void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
22
23#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
new file mode 100644
index 000000000000..0e7a9306bd0c
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -0,0 +1,763 @@
1/*
2 * shmob_drm_crtc.c -- SH Mobile DRM CRTCs
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/backlight.h>
15#include <linux/clk.h>
16
17#include <drm/drmP.h>
18#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h>
22
23#include <video/sh_mobile_meram.h>
24
25#include "shmob_drm_backlight.h"
26#include "shmob_drm_crtc.h"
27#include "shmob_drm_drv.h"
28#include "shmob_drm_kms.h"
29#include "shmob_drm_plane.h"
30#include "shmob_drm_regs.h"
31
32/*
33 * TODO: panel support
34 */
35
36/* -----------------------------------------------------------------------------
37 * Clock management
38 */
39
40static void shmob_drm_clk_on(struct shmob_drm_device *sdev)
41{
42 if (sdev->clock)
43 clk_enable(sdev->clock);
44#if 0
45 if (sdev->meram_dev && sdev->meram_dev->pdev)
46 pm_runtime_get_sync(&sdev->meram_dev->pdev->dev);
47#endif
48}
49
50static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
51{
52#if 0
53 if (sdev->meram_dev && sdev->meram_dev->pdev)
54 pm_runtime_put_sync(&sdev->meram_dev->pdev->dev);
55#endif
56 if (sdev->clock)
57 clk_disable(sdev->clock);
58}
59
60/* -----------------------------------------------------------------------------
61 * CRTC
62 */
63
64static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
65{
66 struct drm_crtc *crtc = &scrtc->crtc;
67 struct shmob_drm_device *sdev = crtc->dev->dev_private;
68 const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
69 const struct drm_display_mode *mode = &crtc->mode;
70 u32 value;
71
72 value = sdev->ldmt1r
73 | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
74 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
75 | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
76 | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
77 | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
78 | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
79 | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
80 lcdc_write(sdev, LDMT1R, value);
81
82 if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
83 idata->interface <= SHMOB_DRM_IFACE_SYS24) {
84 /* Setup SYS bus. */
85 value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
86 | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
87 | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
88 | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
89 | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
90 | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
91 lcdc_write(sdev, LDMT2R, value);
92
93 value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
94 | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
95 | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
96 | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
97 lcdc_write(sdev, LDMT3R, value);
98 }
99
100 value = ((mode->hdisplay / 8) << 16) /* HDCN */
101 | (mode->htotal / 8); /* HTCN */
102 lcdc_write(sdev, LDHCNR, value);
103
104 value = (((mode->hsync_end - mode->hsync_start) / 8) << 16) /* HSYNW */
105 | (mode->hsync_start / 8); /* HSYNP */
106 lcdc_write(sdev, LDHSYNR, value);
107
108 value = ((mode->hdisplay & 7) << 24) | ((mode->htotal & 7) << 16)
109 | (((mode->hsync_end - mode->hsync_start) & 7) << 8)
110 | (mode->hsync_start & 7);
111 lcdc_write(sdev, LDHAJR, value);
112
113 value = ((mode->vdisplay) << 16) /* VDLN */
114 | mode->vtotal; /* VTLN */
115 lcdc_write(sdev, LDVLNR, value);
116
117 value = ((mode->vsync_end - mode->vsync_start) << 16) /* VSYNW */
118 | mode->vsync_start; /* VSYNP */
119 lcdc_write(sdev, LDVSYNR, value);
120}
121
122static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
123{
124 struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
125 u32 value;
126
127 value = lcdc_read(sdev, LDCNT2R);
128 if (start)
129 lcdc_write(sdev, LDCNT2R, value | LDCNT2R_DO);
130 else
131 lcdc_write(sdev, LDCNT2R, value & ~LDCNT2R_DO);
132
133 /* Wait until power is applied/stopped. */
134 while (1) {
135 value = lcdc_read(sdev, LDPMR) & LDPMR_LPS;
136 if ((start && value) || (!start && !value))
137 break;
138
139 cpu_relax();
140 }
141
142 if (!start) {
143 /* Stop the dot clock. */
144 lcdc_write(sdev, LDDCKSTPR, LDDCKSTPR_DCKSTP);
145 }
146}
147
148/*
149 * shmob_drm_crtc_start - Configure and start the LCDC
150 * @scrtc: the SH Mobile CRTC
151 *
152 * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
153 * ...) are not touched by this function.
154 */
155static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
156{
157 struct drm_crtc *crtc = &scrtc->crtc;
158 struct shmob_drm_device *sdev = crtc->dev->dev_private;
159 const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
160 const struct shmob_drm_format_info *format;
161 struct drm_device *dev = sdev->ddev;
162 struct drm_plane *plane;
163 u32 value;
164
165 if (scrtc->started)
166 return;
167
168 format = shmob_drm_format_info(crtc->fb->pixel_format);
169 if (WARN_ON(format == NULL))
170 return;
171
172 /* Enable clocks before accessing the hardware. */
173 shmob_drm_clk_on(sdev);
174
175 /* Reset and enable the LCDC. */
176 lcdc_write(sdev, LDCNT2R, lcdc_read(sdev, LDCNT2R) | LDCNT2R_BR);
177 lcdc_wait_bit(sdev, LDCNT2R, LDCNT2R_BR, 0);
178 lcdc_write(sdev, LDCNT2R, LDCNT2R_ME);
179
180 /* Stop the LCDC first and disable all interrupts. */
181 shmob_drm_crtc_start_stop(scrtc, false);
182 lcdc_write(sdev, LDINTR, 0);
183
184 /* Configure power supply, dot clocks and start them. */
185 lcdc_write(sdev, LDPMR, 0);
186
187 value = sdev->lddckr;
188 if (idata->clk_div) {
189 /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
190 * denominator.
191 */
192 lcdc_write(sdev, LDDCKPAT1R, 0);
193 lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
194
195 if (idata->clk_div == 1)
196 value |= LDDCKR_MOSEL;
197 else
198 value |= idata->clk_div;
199 }
200
201 lcdc_write(sdev, LDDCKR, value);
202 lcdc_write(sdev, LDDCKSTPR, 0);
203 lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
204
205 /* TODO: Setup SYS panel */
206
207 /* Setup geometry, format, frame buffer memory and operation mode. */
208 shmob_drm_crtc_setup_geometry(scrtc);
209
210 /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
211 lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
212 lcdc_write(sdev, LDMLSR, scrtc->line_size);
213 lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
214 if (format->yuv)
215 lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
216 lcdc_write(sdev, LDSM1R, 0);
217
218 /* Word and long word swap. */
219 switch (format->fourcc) {
220 case DRM_FORMAT_RGB565:
221 case DRM_FORMAT_NV21:
222 case DRM_FORMAT_NV61:
223 case DRM_FORMAT_NV42:
224 value = LDDDSR_LS | LDDDSR_WS;
225 break;
226 case DRM_FORMAT_RGB888:
227 case DRM_FORMAT_NV12:
228 case DRM_FORMAT_NV16:
229 case DRM_FORMAT_NV24:
230 value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
231 break;
232 case DRM_FORMAT_ARGB8888:
233 default:
234 value = LDDDSR_LS;
235 break;
236 }
237 lcdc_write(sdev, LDDDSR, value);
238
239 /* Setup planes. */
240 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
241 if (plane->crtc == crtc)
242 shmob_drm_plane_setup(plane);
243 }
244
245 /* Enable the display output. */
246 lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
247
248 shmob_drm_crtc_start_stop(scrtc, true);
249
250 scrtc->started = true;
251}
252
253static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
254{
255 struct drm_crtc *crtc = &scrtc->crtc;
256 struct shmob_drm_device *sdev = crtc->dev->dev_private;
257
258 if (!scrtc->started)
259 return;
260
261 /* Disable the MERAM cache. */
262 if (scrtc->cache) {
263 sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
264 scrtc->cache = NULL;
265 }
266
267 /* Stop the LCDC. */
268 shmob_drm_crtc_start_stop(scrtc, false);
269
270 /* Disable the display output. */
271 lcdc_write(sdev, LDCNT1R, 0);
272
273 /* Stop clocks. */
274 shmob_drm_clk_off(sdev);
275
276 scrtc->started = false;
277}
278
279void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
280{
281 shmob_drm_crtc_stop(scrtc);
282}
283
284void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
285{
286 if (scrtc->dpms != DRM_MODE_DPMS_ON)
287 return;
288
289 shmob_drm_crtc_start(scrtc);
290}
291
292static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
293 int x, int y)
294{
295 struct drm_crtc *crtc = &scrtc->crtc;
296 struct drm_framebuffer *fb = crtc->fb;
297 struct shmob_drm_device *sdev = crtc->dev->dev_private;
298 struct drm_gem_cma_object *gem;
299 unsigned int bpp;
300
301 bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
302 gem = drm_fb_cma_get_gem_obj(fb, 0);
303 scrtc->dma[0] = gem->paddr + fb->offsets[0]
304 + y * fb->pitches[0] + x * bpp / 8;
305
306 if (scrtc->format->yuv) {
307 bpp = scrtc->format->bpp - 8;
308 gem = drm_fb_cma_get_gem_obj(fb, 1);
309 scrtc->dma[1] = gem->paddr + fb->offsets[1]
310 + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
311 + x * (bpp == 16 ? 2 : 1);
312 }
313
314 if (scrtc->cache)
315 sh_mobile_meram_cache_update(sdev->meram, scrtc->cache,
316 scrtc->dma[0], scrtc->dma[1],
317 &scrtc->dma[0], &scrtc->dma[1]);
318}
319
320static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
321{
322 struct drm_crtc *crtc = &scrtc->crtc;
323 struct shmob_drm_device *sdev = crtc->dev->dev_private;
324
325 shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
326
327 lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
328 if (scrtc->format->yuv)
329 lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
330
331 lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
332}
333
334#define to_shmob_crtc(c) container_of(c, struct shmob_drm_crtc, crtc)
335
336static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
337{
338 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
339
340 if (scrtc->dpms == mode)
341 return;
342
343 if (mode == DRM_MODE_DPMS_ON)
344 shmob_drm_crtc_start(scrtc);
345 else
346 shmob_drm_crtc_stop(scrtc);
347
348 scrtc->dpms = mode;
349}
350
351static bool shmob_drm_crtc_mode_fixup(struct drm_crtc *crtc,
352 const struct drm_display_mode *mode,
353 struct drm_display_mode *adjusted_mode)
354{
355 return true;
356}
357
358static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
359{
360 shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
361}
362
363static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
364 struct drm_display_mode *mode,
365 struct drm_display_mode *adjusted_mode,
366 int x, int y,
367 struct drm_framebuffer *old_fb)
368{
369 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
370 struct shmob_drm_device *sdev = crtc->dev->dev_private;
371 const struct sh_mobile_meram_cfg *mdata = sdev->pdata->meram;
372 const struct shmob_drm_format_info *format;
373 void *cache;
374
375 format = shmob_drm_format_info(crtc->fb->pixel_format);
376 if (format == NULL) {
377 dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n",
378 crtc->fb->pixel_format);
379 return -EINVAL;
380 }
381
382 scrtc->format = format;
383 scrtc->line_size = crtc->fb->pitches[0];
384
385 if (sdev->meram) {
386 /* Enable MERAM cache if configured. We need to de-init
387 * configured ICBs before we can re-initialize them.
388 */
389 if (scrtc->cache) {
390 sh_mobile_meram_cache_free(sdev->meram, scrtc->cache);
391 scrtc->cache = NULL;
392 }
393
394 cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata,
395 crtc->fb->pitches[0],
396 adjusted_mode->vdisplay,
397 format->meram,
398 &scrtc->line_size);
399 if (!IS_ERR(cache))
400 scrtc->cache = cache;
401 }
402
403 shmob_drm_crtc_compute_base(scrtc, x, y);
404
405 return 0;
406}
407
408static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
409{
410 shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
411}
412
413static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
414 struct drm_framebuffer *old_fb)
415{
416 shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
417
418 return 0;
419}
420
421static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
422 .dpms = shmob_drm_crtc_dpms,
423 .mode_fixup = shmob_drm_crtc_mode_fixup,
424 .prepare = shmob_drm_crtc_mode_prepare,
425 .commit = shmob_drm_crtc_mode_commit,
426 .mode_set = shmob_drm_crtc_mode_set,
427 .mode_set_base = shmob_drm_crtc_mode_set_base,
428};
429
430void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
431 struct drm_file *file)
432{
433 struct drm_pending_vblank_event *event;
434 struct drm_device *dev = scrtc->crtc.dev;
435 unsigned long flags;
436
437 /* Destroy the pending vertical blanking event associated with the
438 * pending page flip, if any, and disable vertical blanking interrupts.
439 */
440 spin_lock_irqsave(&dev->event_lock, flags);
441 event = scrtc->event;
442 if (event && event->base.file_priv == file) {
443 scrtc->event = NULL;
444 event->base.destroy(&event->base);
445 drm_vblank_put(dev, 0);
446 }
447 spin_unlock_irqrestore(&dev->event_lock, flags);
448}
449
450void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
451{
452 struct drm_pending_vblank_event *event;
453 struct drm_device *dev = scrtc->crtc.dev;
454 struct timeval vblanktime;
455 unsigned long flags;
456
457 spin_lock_irqsave(&dev->event_lock, flags);
458 event = scrtc->event;
459 scrtc->event = NULL;
460 spin_unlock_irqrestore(&dev->event_lock, flags);
461
462 if (event == NULL)
463 return;
464
465 event->event.sequence = drm_vblank_count_and_time(dev, 0, &vblanktime);
466 event->event.tv_sec = vblanktime.tv_sec;
467 event->event.tv_usec = vblanktime.tv_usec;
468
469 spin_lock_irqsave(&dev->event_lock, flags);
470 list_add_tail(&event->base.link, &event->base.file_priv->event_list);
471 wake_up_interruptible(&event->base.file_priv->event_wait);
472 spin_unlock_irqrestore(&dev->event_lock, flags);
473
474 drm_vblank_put(dev, 0);
475}
476
477static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
478 struct drm_framebuffer *fb,
479 struct drm_pending_vblank_event *event)
480{
481 struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
482 struct drm_device *dev = scrtc->crtc.dev;
483 unsigned long flags;
484
485 spin_lock_irqsave(&dev->event_lock, flags);
486 if (scrtc->event != NULL) {
487 spin_unlock_irqrestore(&dev->event_lock, flags);
488 return -EBUSY;
489 }
490 spin_unlock_irqrestore(&dev->event_lock, flags);
491
492 crtc->fb = fb;
493 shmob_drm_crtc_update_base(scrtc);
494
495 if (event) {
496 event->pipe = 0;
497 spin_lock_irqsave(&dev->event_lock, flags);
498 scrtc->event = event;
499 spin_unlock_irqrestore(&dev->event_lock, flags);
500 drm_vblank_get(dev, 0);
501 }
502
503 return 0;
504}
505
506static const struct drm_crtc_funcs crtc_funcs = {
507 .destroy = drm_crtc_cleanup,
508 .set_config = drm_crtc_helper_set_config,
509 .page_flip = shmob_drm_crtc_page_flip,
510};
511
512int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
513{
514 struct drm_crtc *crtc = &sdev->crtc.crtc;
515 int ret;
516
517 sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
518
519 ret = drm_crtc_init(sdev->ddev, crtc, &crtc_funcs);
520 if (ret < 0)
521 return ret;
522
523 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
524
525 return 0;
526}
527
528/* -----------------------------------------------------------------------------
529 * Encoder
530 */
531
532#define to_shmob_encoder(e) \
533 container_of(e, struct shmob_drm_encoder, encoder)
534
535static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
536{
537 struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
538 struct shmob_drm_device *sdev = encoder->dev->dev_private;
539 struct shmob_drm_connector *scon = &sdev->connector;
540
541 if (senc->dpms == mode)
542 return;
543
544 shmob_drm_backlight_dpms(scon, mode);
545
546 senc->dpms = mode;
547}
548
549static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
550 const struct drm_display_mode *mode,
551 struct drm_display_mode *adjusted_mode)
552{
553 struct drm_device *dev = encoder->dev;
554 struct shmob_drm_device *sdev = dev->dev_private;
555 struct drm_connector *connector = &sdev->connector.connector;
556 const struct drm_display_mode *panel_mode;
557
558 if (list_empty(&connector->modes)) {
559 dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
560 return false;
561 }
562
563 /* The flat panel mode is fixed, just copy it to the adjusted mode. */
564 panel_mode = list_first_entry(&connector->modes,
565 struct drm_display_mode, head);
566 drm_mode_copy(adjusted_mode, panel_mode);
567
568 return true;
569}
570
571static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
572{
573 /* No-op, everything is handled in the CRTC code. */
574}
575
576static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
577 struct drm_display_mode *mode,
578 struct drm_display_mode *adjusted_mode)
579{
580 /* No-op, everything is handled in the CRTC code. */
581}
582
583static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
584{
585 /* No-op, everything is handled in the CRTC code. */
586}
587
588static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
589 .dpms = shmob_drm_encoder_dpms,
590 .mode_fixup = shmob_drm_encoder_mode_fixup,
591 .prepare = shmob_drm_encoder_mode_prepare,
592 .commit = shmob_drm_encoder_mode_commit,
593 .mode_set = shmob_drm_encoder_mode_set,
594};
595
596static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
597{
598 drm_encoder_cleanup(encoder);
599}
600
601static const struct drm_encoder_funcs encoder_funcs = {
602 .destroy = shmob_drm_encoder_destroy,
603};
604
605int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
606{
607 struct drm_encoder *encoder = &sdev->encoder.encoder;
608 int ret;
609
610 sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
611
612 encoder->possible_crtcs = 1;
613
614 ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
615 DRM_MODE_ENCODER_LVDS);
616 if (ret < 0)
617 return ret;
618
619 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
620
621 return 0;
622}
623
624void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable)
625{
626 unsigned long flags;
627 u32 ldintr;
628
629 /* Be careful not to acknowledge any pending interrupt. */
630 spin_lock_irqsave(&sdev->irq_lock, flags);
631 ldintr = lcdc_read(sdev, LDINTR) | LDINTR_STATUS_MASK;
632 if (enable)
633 ldintr |= LDINTR_VEE;
634 else
635 ldintr &= ~LDINTR_VEE;
636 lcdc_write(sdev, LDINTR, ldintr);
637 spin_unlock_irqrestore(&sdev->irq_lock, flags);
638}
639
640/* -----------------------------------------------------------------------------
641 * Connector
642 */
643
644#define to_shmob_connector(c) \
645 container_of(c, struct shmob_drm_connector, connector)
646
647static int shmob_drm_connector_get_modes(struct drm_connector *connector)
648{
649 struct shmob_drm_device *sdev = connector->dev->dev_private;
650 struct drm_display_mode *mode;
651
652 mode = drm_mode_create(connector->dev);
653 if (mode == NULL)
654 return 0;
655
656 mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
657 mode->clock = sdev->pdata->panel.mode.clock;
658 mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
659 mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
660 mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
661 mode->htotal = sdev->pdata->panel.mode.htotal;
662 mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
663 mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
664 mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
665 mode->vtotal = sdev->pdata->panel.mode.vtotal;
666 mode->flags = sdev->pdata->panel.mode.flags;
667
668 drm_mode_set_name(mode);
669 drm_mode_probed_add(connector, mode);
670
671 connector->display_info.width_mm = sdev->pdata->panel.width_mm;
672 connector->display_info.height_mm = sdev->pdata->panel.height_mm;
673
674 return 1;
675}
676
677static int shmob_drm_connector_mode_valid(struct drm_connector *connector,
678 struct drm_display_mode *mode)
679{
680 return MODE_OK;
681}
682
683static struct drm_encoder *
684shmob_drm_connector_best_encoder(struct drm_connector *connector)
685{
686 struct shmob_drm_connector *scon = to_shmob_connector(connector);
687
688 return scon->encoder;
689}
690
691static const struct drm_connector_helper_funcs connector_helper_funcs = {
692 .get_modes = shmob_drm_connector_get_modes,
693 .mode_valid = shmob_drm_connector_mode_valid,
694 .best_encoder = shmob_drm_connector_best_encoder,
695};
696
697static void shmob_drm_connector_destroy(struct drm_connector *connector)
698{
699 struct shmob_drm_connector *scon = to_shmob_connector(connector);
700
701 shmob_drm_backlight_exit(scon);
702 drm_sysfs_connector_remove(connector);
703 drm_connector_cleanup(connector);
704}
705
706static enum drm_connector_status
707shmob_drm_connector_detect(struct drm_connector *connector, bool force)
708{
709 return connector_status_connected;
710}
711
712static const struct drm_connector_funcs connector_funcs = {
713 .dpms = drm_helper_connector_dpms,
714 .detect = shmob_drm_connector_detect,
715 .fill_modes = drm_helper_probe_single_connector_modes,
716 .destroy = shmob_drm_connector_destroy,
717};
718
719int shmob_drm_connector_create(struct shmob_drm_device *sdev,
720 struct drm_encoder *encoder)
721{
722 struct drm_connector *connector = &sdev->connector.connector;
723 int ret;
724
725 sdev->connector.encoder = encoder;
726
727 connector->display_info.width_mm = sdev->pdata->panel.width_mm;
728 connector->display_info.height_mm = sdev->pdata->panel.height_mm;
729
730 ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
731 DRM_MODE_CONNECTOR_LVDS);
732 if (ret < 0)
733 return ret;
734
735 drm_connector_helper_add(connector, &connector_helper_funcs);
736 ret = drm_sysfs_connector_add(connector);
737 if (ret < 0)
738 goto err_cleanup;
739
740 ret = shmob_drm_backlight_init(&sdev->connector);
741 if (ret < 0)
742 goto err_sysfs;
743
744 ret = drm_mode_connector_attach_encoder(connector, encoder);
745 if (ret < 0)
746 goto err_backlight;
747
748 connector->encoder = encoder;
749
750 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
751 drm_connector_property_set_value(connector,
752 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
753
754 return 0;
755
756err_backlight:
757 shmob_drm_backlight_exit(&sdev->connector);
758err_sysfs:
759 drm_sysfs_connector_remove(connector);
760err_cleanup:
761 drm_connector_cleanup(connector);
762 return ret;
763}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
new file mode 100644
index 000000000000..e5bd109c4c38
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -0,0 +1,60 @@
1/*
2 * shmob_drm_crtc.h -- SH Mobile DRM CRTCs
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_CRTC_H__
15#define __SHMOB_DRM_CRTC_H__
16
17#include <drm/drmP.h>
18#include <drm/drm_crtc.h>
19
20struct backlight_device;
21struct shmob_drm_device;
22
23struct shmob_drm_crtc {
24 struct drm_crtc crtc;
25
26 struct drm_pending_vblank_event *event;
27 int dpms;
28
29 const struct shmob_drm_format_info *format;
30 void *cache;
31 unsigned long dma[2];
32 unsigned int line_size;
33 bool started;
34};
35
36struct shmob_drm_encoder {
37 struct drm_encoder encoder;
38 int dpms;
39};
40
41struct shmob_drm_connector {
42 struct drm_connector connector;
43 struct drm_encoder *encoder;
44
45 struct backlight_device *backlight;
46};
47
48int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
49void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
50void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
51 struct drm_file *file);
52void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
53void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
54void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
55
56int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
57int shmob_drm_connector_create(struct shmob_drm_device *sdev,
58 struct drm_encoder *encoder);
59
60#endif /* __SHMOB_DRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
new file mode 100644
index 000000000000..c71d493fd0c5
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -0,0 +1,361 @@
1/*
2 * shmob_drm_drv.c -- SH Mobile DRM driver
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21
22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h>
25
26#include "shmob_drm_crtc.h"
27#include "shmob_drm_drv.h"
28#include "shmob_drm_kms.h"
29#include "shmob_drm_plane.h"
30#include "shmob_drm_regs.h"
31
32/* -----------------------------------------------------------------------------
33 * Hardware initialization
34 */
35
36static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
37{
38 static const u32 ldmt1r[] = {
39 [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
40 [SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
41 [SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
42 [SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
43 [SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
44 [SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
45 [SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
46 [SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
47 [SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
48 [SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
49 [SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
50 [SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
51 [SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
52 [SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
53 [SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
54 [SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
55 [SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
56 [SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
57 [SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
58 };
59
60 if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
61 dev_err(sdev->dev, "invalid interface type %u\n",
62 sdev->pdata->iface.interface);
63 return -EINVAL;
64 }
65
66 sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
67 return 0;
68}
69
70static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
71 enum shmob_drm_clk_source clksrc)
72{
73 struct clk *clk;
74 char *clkname;
75
76 switch (clksrc) {
77 case SHMOB_DRM_CLK_BUS:
78 clkname = "bus_clk";
79 sdev->lddckr = LDDCKR_ICKSEL_BUS;
80 break;
81 case SHMOB_DRM_CLK_PERIPHERAL:
82 clkname = "peripheral_clk";
83 sdev->lddckr = LDDCKR_ICKSEL_MIPI;
84 break;
85 case SHMOB_DRM_CLK_EXTERNAL:
86 clkname = NULL;
87 sdev->lddckr = LDDCKR_ICKSEL_HDMI;
88 break;
89 default:
90 return -EINVAL;
91 }
92
93 clk = clk_get(sdev->dev, clkname);
94 if (IS_ERR(clk)) {
95 dev_err(sdev->dev, "cannot get dot clock %s\n", clkname);
96 return PTR_ERR(clk);
97 }
98
99 sdev->clock = clk;
100 return 0;
101}
102
103/* -----------------------------------------------------------------------------
104 * DRM operations
105 */
106
107static int shmob_drm_unload(struct drm_device *dev)
108{
109 struct shmob_drm_device *sdev = dev->dev_private;
110
111 drm_kms_helper_poll_fini(dev);
112 drm_mode_config_cleanup(dev);
113 drm_vblank_cleanup(dev);
114 drm_irq_uninstall(dev);
115
116 if (sdev->clock)
117 clk_put(sdev->clock);
118
119 if (sdev->mmio)
120 iounmap(sdev->mmio);
121
122 dev->dev_private = NULL;
123 kfree(sdev);
124
125 return 0;
126}
127
128static int shmob_drm_load(struct drm_device *dev, unsigned long flags)
129{
130 struct shmob_drm_platform_data *pdata = dev->dev->platform_data;
131 struct platform_device *pdev = dev->platformdev;
132 struct shmob_drm_device *sdev;
133 struct resource *res;
134 unsigned int i;
135 int ret;
136
137 if (pdata == NULL) {
138 dev_err(dev->dev, "no platform data\n");
139 return -EINVAL;
140 }
141
142 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
143 if (sdev == NULL) {
144 dev_err(dev->dev, "failed to allocate private data\n");
145 return -ENOMEM;
146 }
147
148 sdev->dev = &pdev->dev;
149 sdev->pdata = pdata;
150 spin_lock_init(&sdev->irq_lock);
151
152 sdev->ddev = dev;
153 dev->dev_private = sdev;
154
155 /* I/O resources and clocks */
156 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
157 if (res == NULL) {
158 dev_err(&pdev->dev, "failed to get memory resource\n");
159 ret = -EINVAL;
160 goto done;
161 }
162
163 sdev->mmio = ioremap_nocache(res->start, resource_size(res));
164 if (sdev->mmio == NULL) {
165 dev_err(&pdev->dev, "failed to remap memory resource\n");
166 ret = -ENOMEM;
167 goto done;
168 }
169
170 ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
171 if (ret < 0)
172 goto done;
173
174 ret = shmob_drm_init_interface(sdev);
175 if (ret < 0)
176 goto done;
177
178 ret = shmob_drm_modeset_init(sdev);
179 if (ret < 0) {
180 dev_err(&pdev->dev, "failed to initialize mode setting\n");
181 goto done;
182 }
183
184 for (i = 0; i < 4; ++i) {
185 ret = shmob_drm_plane_create(sdev, i);
186 if (ret < 0) {
187 dev_err(&pdev->dev, "failed to create plane %u\n", i);
188 goto done;
189 }
190 }
191
192 ret = drm_vblank_init(dev, 1);
193 if (ret < 0) {
194 dev_err(&pdev->dev, "failed to initialize vblank\n");
195 goto done;
196 }
197
198 ret = drm_irq_install(dev);
199 if (ret < 0) {
200 dev_err(&pdev->dev, "failed to install IRQ handler\n");
201 goto done;
202 }
203
204done:
205 if (ret)
206 shmob_drm_unload(dev);
207
208 return ret;
209}
210
211static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
212{
213 struct shmob_drm_device *sdev = dev->dev_private;
214
215 shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
216}
217
218static irqreturn_t shmob_drm_irq(int irq, void *arg)
219{
220 struct drm_device *dev = arg;
221 struct shmob_drm_device *sdev = dev->dev_private;
222 unsigned long flags;
223 u32 status;
224
225 /* Acknowledge interrupts. Putting interrupt enable and interrupt flag
226 * bits in the same register is really brain-dead design and requires
227 * taking a spinlock.
228 */
229 spin_lock_irqsave(&sdev->irq_lock, flags);
230 status = lcdc_read(sdev, LDINTR);
231 lcdc_write(sdev, LDINTR, status ^ LDINTR_STATUS_MASK);
232 spin_unlock_irqrestore(&sdev->irq_lock, flags);
233
234 if (status & LDINTR_VES) {
235 drm_handle_vblank(dev, 0);
236 shmob_drm_crtc_finish_page_flip(&sdev->crtc);
237 }
238
239 return IRQ_HANDLED;
240}
241
242static int shmob_drm_enable_vblank(struct drm_device *dev, int crtc)
243{
244 struct shmob_drm_device *sdev = dev->dev_private;
245
246 shmob_drm_crtc_enable_vblank(sdev, true);
247
248 return 0;
249}
250
251static void shmob_drm_disable_vblank(struct drm_device *dev, int crtc)
252{
253 struct shmob_drm_device *sdev = dev->dev_private;
254
255 shmob_drm_crtc_enable_vblank(sdev, false);
256}
257
258static const struct file_operations shmob_drm_fops = {
259 .owner = THIS_MODULE,
260 .open = drm_open,
261 .release = drm_release,
262 .unlocked_ioctl = drm_ioctl,
263#ifdef CONFIG_COMPAT
264 .compat_ioctl = drm_compat_ioctl,
265#endif
266 .poll = drm_poll,
267 .read = drm_read,
268 .fasync = drm_fasync,
269 .llseek = no_llseek,
270 .mmap = drm_gem_cma_mmap,
271};
272
273static struct drm_driver shmob_drm_driver = {
274 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
275 .load = shmob_drm_load,
276 .unload = shmob_drm_unload,
277 .preclose = shmob_drm_preclose,
278 .irq_handler = shmob_drm_irq,
279 .get_vblank_counter = drm_vblank_count,
280 .enable_vblank = shmob_drm_enable_vblank,
281 .disable_vblank = shmob_drm_disable_vblank,
282 .gem_free_object = drm_gem_cma_free_object,
283 .gem_vm_ops = &drm_gem_cma_vm_ops,
284 .dumb_create = drm_gem_cma_dumb_create,
285 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
286 .dumb_destroy = drm_gem_cma_dumb_destroy,
287 .fops = &shmob_drm_fops,
288 .name = "shmob-drm",
289 .desc = "Renesas SH Mobile DRM",
290 .date = "20120424",
291 .major = 1,
292 .minor = 0,
293};
294
295/* -----------------------------------------------------------------------------
296 * Power management
297 */
298
299#if CONFIG_PM_SLEEP
300static int shmob_drm_pm_suspend(struct device *dev)
301{
302 struct platform_device *pdev = to_platform_device(dev);
303 struct drm_device *ddev = platform_get_drvdata(pdev);
304 struct shmob_drm_device *sdev = ddev->dev_private;
305
306 drm_kms_helper_poll_disable(ddev);
307 shmob_drm_crtc_suspend(&sdev->crtc);
308
309 return 0;
310}
311
312static int shmob_drm_pm_resume(struct device *dev)
313{
314 struct platform_device *pdev = to_platform_device(dev);
315 struct drm_device *ddev = platform_get_drvdata(pdev);
316 struct shmob_drm_device *sdev = ddev->dev_private;
317
318 mutex_lock(&sdev->ddev->mode_config.mutex);
319 shmob_drm_crtc_resume(&sdev->crtc);
320 mutex_unlock(&sdev->ddev->mode_config.mutex);
321
322 drm_kms_helper_poll_enable(sdev->ddev);
323 return 0;
324}
325#endif
326
327static const struct dev_pm_ops shmob_drm_pm_ops = {
328 SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
329};
330
331/* -----------------------------------------------------------------------------
332 * Platform driver
333 */
334
335static int __devinit shmob_drm_probe(struct platform_device *pdev)
336{
337 return drm_platform_init(&shmob_drm_driver, pdev);
338}
339
340static int __devexit shmob_drm_remove(struct platform_device *pdev)
341{
342 drm_platform_exit(&shmob_drm_driver, pdev);
343
344 return 0;
345}
346
347static struct platform_driver shmob_drm_platform_driver = {
348 .probe = shmob_drm_probe,
349 .remove = __devexit_p(shmob_drm_remove),
350 .driver = {
351 .owner = THIS_MODULE,
352 .name = "shmob-drm",
353 .pm = &shmob_drm_pm_ops,
354 },
355};
356
357module_platform_driver(shmob_drm_platform_driver);
358
359MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
360MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver");
361MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.h b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
new file mode 100644
index 000000000000..4d46b811b5a7
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.h
@@ -0,0 +1,47 @@
1/*
2 * shmob_drm.h -- SH Mobile DRM driver
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_DRV_H__
15#define __SHMOB_DRM_DRV_H__
16
17#include <linux/kernel.h>
18#include <linux/platform_data/shmob_drm.h>
19#include <linux/spinlock.h>
20
21#include "shmob_drm_crtc.h"
22
23struct clk;
24struct device;
25struct drm_device;
26struct sh_mobile_meram_info;
27
28struct shmob_drm_device {
29 struct device *dev;
30 const struct shmob_drm_platform_data *pdata;
31
32 void __iomem *mmio;
33 struct clk *clock;
34 struct sh_mobile_meram_info *meram;
35 u32 lddckr;
36 u32 ldmt1r;
37
38 spinlock_t irq_lock; /* Protects hardware LDINTR register */
39
40 struct drm_device *ddev;
41
42 struct shmob_drm_crtc crtc;
43 struct shmob_drm_encoder encoder;
44 struct shmob_drm_connector connector;
45};
46
47#endif /* __SHMOB_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
new file mode 100644
index 000000000000..c291ee385b4f
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -0,0 +1,160 @@
1/*
2 * shmob_drm_kms.c -- SH Mobile DRM Mode Setting
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19
20#include <video/sh_mobile_meram.h>
21
22#include "shmob_drm_crtc.h"
23#include "shmob_drm_drv.h"
24#include "shmob_drm_kms.h"
25#include "shmob_drm_regs.h"
26
27/* -----------------------------------------------------------------------------
28 * Format helpers
29 */
30
31static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
32 {
33 .fourcc = DRM_FORMAT_RGB565,
34 .bpp = 16,
35 .yuv = false,
36 .lddfr = LDDFR_PKF_RGB16,
37 .meram = SH_MOBILE_MERAM_PF_RGB,
38 }, {
39 .fourcc = DRM_FORMAT_RGB888,
40 .bpp = 24,
41 .yuv = false,
42 .lddfr = LDDFR_PKF_RGB24,
43 .meram = SH_MOBILE_MERAM_PF_RGB,
44 }, {
45 .fourcc = DRM_FORMAT_ARGB8888,
46 .bpp = 32,
47 .yuv = false,
48 .lddfr = LDDFR_PKF_ARGB32,
49 .meram = SH_MOBILE_MERAM_PF_RGB,
50 }, {
51 .fourcc = DRM_FORMAT_NV12,
52 .bpp = 12,
53 .yuv = true,
54 .lddfr = LDDFR_CC | LDDFR_YF_420,
55 .meram = SH_MOBILE_MERAM_PF_NV,
56 }, {
57 .fourcc = DRM_FORMAT_NV21,
58 .bpp = 12,
59 .yuv = true,
60 .lddfr = LDDFR_CC | LDDFR_YF_420,
61 .meram = SH_MOBILE_MERAM_PF_NV,
62 }, {
63 .fourcc = DRM_FORMAT_NV16,
64 .bpp = 16,
65 .yuv = true,
66 .lddfr = LDDFR_CC | LDDFR_YF_422,
67 .meram = SH_MOBILE_MERAM_PF_NV,
68 }, {
69 .fourcc = DRM_FORMAT_NV61,
70 .bpp = 16,
71 .yuv = true,
72 .lddfr = LDDFR_CC | LDDFR_YF_422,
73 .meram = SH_MOBILE_MERAM_PF_NV,
74 }, {
75 .fourcc = DRM_FORMAT_NV24,
76 .bpp = 24,
77 .yuv = true,
78 .lddfr = LDDFR_CC | LDDFR_YF_444,
79 .meram = SH_MOBILE_MERAM_PF_NV24,
80 }, {
81 .fourcc = DRM_FORMAT_NV42,
82 .bpp = 24,
83 .yuv = true,
84 .lddfr = LDDFR_CC | LDDFR_YF_444,
85 .meram = SH_MOBILE_MERAM_PF_NV24,
86 },
87};
88
89const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc)
90{
91 unsigned int i;
92
93 for (i = 0; i < ARRAY_SIZE(shmob_drm_format_infos); ++i) {
94 if (shmob_drm_format_infos[i].fourcc == fourcc)
95 return &shmob_drm_format_infos[i];
96 }
97
98 return NULL;
99}
100
101/* -----------------------------------------------------------------------------
102 * Frame buffer
103 */
104
105static struct drm_framebuffer *
106shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
107 struct drm_mode_fb_cmd2 *mode_cmd)
108{
109 const struct shmob_drm_format_info *format;
110
111 format = shmob_drm_format_info(mode_cmd->pixel_format);
112 if (format == NULL) {
113 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
114 mode_cmd->pixel_format);
115 return ERR_PTR(-EINVAL);
116 }
117
118 if (mode_cmd->pitches[0] & 7 || mode_cmd->pitches[0] >= 65536) {
119 dev_dbg(dev->dev, "valid pitch value %u\n",
120 mode_cmd->pitches[0]);
121 return ERR_PTR(-EINVAL);
122 }
123
124 if (format->yuv) {
125 unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
126
127 if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
128 dev_dbg(dev->dev,
129 "luma and chroma pitches do not match\n");
130 return ERR_PTR(-EINVAL);
131 }
132 }
133
134 return drm_fb_cma_create(dev, file_priv, mode_cmd);
135}
136
137static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
138 .fb_create = shmob_drm_fb_create,
139};
140
141int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
142{
143 drm_mode_config_init(sdev->ddev);
144
145 shmob_drm_crtc_create(sdev);
146 shmob_drm_encoder_create(sdev);
147 shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
148
149 drm_kms_helper_poll_init(sdev->ddev);
150
151 sdev->ddev->mode_config.min_width = 0;
152 sdev->ddev->mode_config.min_height = 0;
153 sdev->ddev->mode_config.max_width = 4095;
154 sdev->ddev->mode_config.max_height = 4095;
155 sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
156
157 drm_helper_disable_unused_functions(sdev->ddev);
158
159 return 0;
160}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.h b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
new file mode 100644
index 000000000000..9495c9111308
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.h
@@ -0,0 +1,34 @@
1/*
2 * shmob_drm_kms.h -- SH Mobile DRM Mode Setting
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_KMS_H__
15#define __SHMOB_DRM_KMS_H__
16
17#include <linux/types.h>
18
19struct drm_gem_cma_object;
20struct shmob_drm_device;
21
22struct shmob_drm_format_info {
23 u32 fourcc;
24 unsigned int bpp;
25 bool yuv;
26 u32 lddfr;
27 unsigned int meram;
28};
29
30const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
31
32int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
33
34#endif /* __SHMOB_DRM_KMS_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
new file mode 100644
index 000000000000..e1eb899b0288
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -0,0 +1,268 @@
1/*
2 * shmob_drm_plane.c -- SH Mobile DRM Planes
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h>
19
20#include <video/sh_mobile_meram.h>
21
22#include "shmob_drm_drv.h"
23#include "shmob_drm_kms.h"
24#include "shmob_drm_plane.h"
25#include "shmob_drm_regs.h"
26
27struct shmob_drm_plane {
28 struct drm_plane plane;
29 unsigned int index;
30 unsigned int alpha;
31
32 const struct shmob_drm_format_info *format;
33 unsigned long dma[2];
34
35 unsigned int src_x;
36 unsigned int src_y;
37 unsigned int crtc_x;
38 unsigned int crtc_y;
39 unsigned int crtc_w;
40 unsigned int crtc_h;
41};
42
43#define to_shmob_plane(p) container_of(p, struct shmob_drm_plane, plane)
44
45static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
46 struct drm_framebuffer *fb,
47 int x, int y)
48{
49 struct drm_gem_cma_object *gem;
50 unsigned int bpp;
51
52 bpp = splane->format->yuv ? 8 : splane->format->bpp;
53 gem = drm_fb_cma_get_gem_obj(fb, 0);
54 splane->dma[0] = gem->paddr + fb->offsets[0]
55 + y * fb->pitches[0] + x * bpp / 8;
56
57 if (splane->format->yuv) {
58 bpp = splane->format->bpp - 8;
59 gem = drm_fb_cma_get_gem_obj(fb, 1);
60 splane->dma[1] = gem->paddr + fb->offsets[1]
61 + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
62 + x * (bpp == 16 ? 2 : 1);
63 }
64}
65
66static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
67 struct drm_framebuffer *fb)
68{
69 struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
70 u32 format;
71
72 /* TODO: Support ROP3 mode */
73 format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
74
75 switch (splane->format->fourcc) {
76 case DRM_FORMAT_RGB565:
77 case DRM_FORMAT_NV21:
78 case DRM_FORMAT_NV61:
79 case DRM_FORMAT_NV42:
80 format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
81 break;
82 case DRM_FORMAT_RGB888:
83 case DRM_FORMAT_NV12:
84 case DRM_FORMAT_NV16:
85 case DRM_FORMAT_NV24:
86 format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
87 break;
88 case DRM_FORMAT_ARGB8888:
89 default:
90 format |= LDBBSIFR_SWPL;
91 break;
92 }
93
94 switch (splane->format->fourcc) {
95 case DRM_FORMAT_RGB565:
96 format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
97 break;
98 case DRM_FORMAT_RGB888:
99 format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
100 break;
101 case DRM_FORMAT_ARGB8888:
102 format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
103 break;
104 case DRM_FORMAT_NV12:
105 case DRM_FORMAT_NV21:
106 format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
107 break;
108 case DRM_FORMAT_NV16:
109 case DRM_FORMAT_NV61:
110 format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
111 break;
112 case DRM_FORMAT_NV24:
113 case DRM_FORMAT_NV42:
114 format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
115 break;
116 }
117
118#define plane_reg_dump(sdev, splane, reg) \
119 dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
120 splane->index, #reg, \
121 lcdc_read(sdev, reg(splane->index)), \
122 lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
123
124 plane_reg_dump(sdev, splane, LDBnBSIFR);
125 plane_reg_dump(sdev, splane, LDBnBSSZR);
126 plane_reg_dump(sdev, splane, LDBnBLOCR);
127 plane_reg_dump(sdev, splane, LDBnBSMWR);
128 plane_reg_dump(sdev, splane, LDBnBSAYR);
129 plane_reg_dump(sdev, splane, LDBnBSACR);
130
131 lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
132 dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
133 "LDBCR", lcdc_read(sdev, LDBCR));
134
135 lcdc_write(sdev, LDBnBSIFR(splane->index), format);
136
137 lcdc_write(sdev, LDBnBSSZR(splane->index),
138 (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
139 (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
140 lcdc_write(sdev, LDBnBLOCR(splane->index),
141 (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
142 (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
143 lcdc_write(sdev, LDBnBSMWR(splane->index),
144 fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
145
146 shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
147
148 lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
149 if (splane->format->yuv)
150 lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
151
152 lcdc_write(sdev, LDBCR,
153 LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
154 dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
155 "LDBCR", lcdc_read(sdev, LDBCR));
156
157 plane_reg_dump(sdev, splane, LDBnBSIFR);
158 plane_reg_dump(sdev, splane, LDBnBSSZR);
159 plane_reg_dump(sdev, splane, LDBnBLOCR);
160 plane_reg_dump(sdev, splane, LDBnBSMWR);
161 plane_reg_dump(sdev, splane, LDBnBSAYR);
162 plane_reg_dump(sdev, splane, LDBnBSACR);
163}
164
165void shmob_drm_plane_setup(struct drm_plane *plane)
166{
167 struct shmob_drm_plane *splane = to_shmob_plane(plane);
168
169 if (plane->fb == NULL || !plane->enabled)
170 return;
171
172 __shmob_drm_plane_setup(splane, plane->fb);
173}
174
175static int
176shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
177 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
178 unsigned int crtc_w, unsigned int crtc_h,
179 uint32_t src_x, uint32_t src_y,
180 uint32_t src_w, uint32_t src_h)
181{
182 struct shmob_drm_plane *splane = to_shmob_plane(plane);
183 struct shmob_drm_device *sdev = plane->dev->dev_private;
184 const struct shmob_drm_format_info *format;
185
186 format = shmob_drm_format_info(fb->pixel_format);
187 if (format == NULL) {
188 dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
189 fb->pixel_format);
190 return -EINVAL;
191 }
192
193 if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
194 dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
195 return -EINVAL;
196 }
197
198 splane->format = format;
199
200 splane->src_x = src_x >> 16;
201 splane->src_y = src_y >> 16;
202 splane->crtc_x = crtc_x;
203 splane->crtc_y = crtc_y;
204 splane->crtc_w = crtc_w;
205 splane->crtc_h = crtc_h;
206
207 __shmob_drm_plane_setup(splane, fb);
208 return 0;
209}
210
211static int shmob_drm_plane_disable(struct drm_plane *plane)
212{
213 struct shmob_drm_plane *splane = to_shmob_plane(plane);
214 struct shmob_drm_device *sdev = plane->dev->dev_private;
215
216 splane->format = NULL;
217
218 lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
219 return 0;
220}
221
222static void shmob_drm_plane_destroy(struct drm_plane *plane)
223{
224 struct shmob_drm_plane *splane = to_shmob_plane(plane);
225
226 shmob_drm_plane_disable(plane);
227 drm_plane_cleanup(plane);
228 kfree(splane);
229}
230
231static const struct drm_plane_funcs shmob_drm_plane_funcs = {
232 .update_plane = shmob_drm_plane_update,
233 .disable_plane = shmob_drm_plane_disable,
234 .destroy = shmob_drm_plane_destroy,
235};
236
237static const uint32_t formats[] = {
238 DRM_FORMAT_RGB565,
239 DRM_FORMAT_RGB888,
240 DRM_FORMAT_ARGB8888,
241 DRM_FORMAT_NV12,
242 DRM_FORMAT_NV21,
243 DRM_FORMAT_NV16,
244 DRM_FORMAT_NV61,
245 DRM_FORMAT_NV24,
246 DRM_FORMAT_NV42,
247};
248
249int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
250{
251 struct shmob_drm_plane *splane;
252 int ret;
253
254 splane = kzalloc(sizeof(*splane), GFP_KERNEL);
255 if (splane == NULL)
256 return -ENOMEM;
257
258 splane->index = index;
259 splane->alpha = 255;
260
261 ret = drm_plane_init(sdev->ddev, &splane->plane, 1,
262 &shmob_drm_plane_funcs, formats,
263 ARRAY_SIZE(formats), false);
264 if (ret < 0)
265 kfree(splane);
266
267 return ret;
268}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
new file mode 100644
index 000000000000..99623d05e3b0
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -0,0 +1,22 @@
1/*
2 * shmob_drm_plane.h -- SH Mobile DRM Planes
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_PLANE_H__
15#define __SHMOB_DRM_PLANE_H__
16
17struct shmob_drm_device;
18
19int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
20void shmob_drm_plane_setup(struct drm_plane *plane);
21
22#endif /* __SHMOB_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
new file mode 100644
index 000000000000..7923cdd6368e
--- /dev/null
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -0,0 +1,311 @@
1/*
2 * shmob_drm_regs.h -- SH Mobile DRM registers
3 *
4 * Copyright (C) 2012 Renesas Corporation
5 *
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __SHMOB_DRM_REGS_H__
15#define __SHMOB_DRM_REGS_H__
16
17#include <linux/io.h>
18
19/* Register definitions */
20#define LDDCKPAT1R 0x400
21#define LDDCKPAT2R 0x404
22#define LDDCKR 0x410
23#define LDDCKR_ICKSEL_BUS (0 << 16)
24#define LDDCKR_ICKSEL_MIPI (1 << 16)
25#define LDDCKR_ICKSEL_HDMI (2 << 16)
26#define LDDCKR_ICKSEL_EXT (3 << 16)
27#define LDDCKR_ICKSEL_MASK (7 << 16)
28#define LDDCKR_MOSEL (1 << 6)
29#define LDDCKSTPR 0x414
30#define LDDCKSTPR_DCKSTS (1 << 16)
31#define LDDCKSTPR_DCKSTP (1 << 0)
32#define LDMT1R 0x418
33#define LDMT1R_VPOL (1 << 28)
34#define LDMT1R_HPOL (1 << 27)
35#define LDMT1R_DWPOL (1 << 26)
36#define LDMT1R_DIPOL (1 << 25)
37#define LDMT1R_DAPOL (1 << 24)
38#define LDMT1R_HSCNT (1 << 17)
39#define LDMT1R_DWCNT (1 << 16)
40#define LDMT1R_IFM (1 << 12)
41#define LDMT1R_MIFTYP_RGB8 (0x0 << 0)
42#define LDMT1R_MIFTYP_RGB9 (0x4 << 0)
43#define LDMT1R_MIFTYP_RGB12A (0x5 << 0)
44#define LDMT1R_MIFTYP_RGB12B (0x6 << 0)
45#define LDMT1R_MIFTYP_RGB16 (0x7 << 0)
46#define LDMT1R_MIFTYP_RGB18 (0xa << 0)
47#define LDMT1R_MIFTYP_RGB24 (0xb << 0)
48#define LDMT1R_MIFTYP_YCBCR (0xf << 0)
49#define LDMT1R_MIFTYP_SYS8A (0x0 << 0)
50#define LDMT1R_MIFTYP_SYS8B (0x1 << 0)
51#define LDMT1R_MIFTYP_SYS8C (0x2 << 0)
52#define LDMT1R_MIFTYP_SYS8D (0x3 << 0)
53#define LDMT1R_MIFTYP_SYS9 (0x4 << 0)
54#define LDMT1R_MIFTYP_SYS12 (0x5 << 0)
55#define LDMT1R_MIFTYP_SYS16A (0x7 << 0)
56#define LDMT1R_MIFTYP_SYS16B (0x8 << 0)
57#define LDMT1R_MIFTYP_SYS16C (0x9 << 0)
58#define LDMT1R_MIFTYP_SYS18 (0xa << 0)
59#define LDMT1R_MIFTYP_SYS24 (0xb << 0)
60#define LDMT1R_MIFTYP_MASK (0xf << 0)
61#define LDMT2R 0x41c
62#define LDMT2R_CSUP_MASK (7 << 26)
63#define LDMT2R_CSUP_SHIFT 26
64#define LDMT2R_RSV (1 << 25)
65#define LDMT2R_VSEL (1 << 24)
66#define LDMT2R_WCSC_MASK (0xff << 16)
67#define LDMT2R_WCSC_SHIFT 16
68#define LDMT2R_WCEC_MASK (0xff << 8)
69#define LDMT2R_WCEC_SHIFT 8
70#define LDMT2R_WCLW_MASK (0xff << 0)
71#define LDMT2R_WCLW_SHIFT 0
72#define LDMT3R 0x420
73#define LDMT3R_RDLC_MASK (0x3f << 24)
74#define LDMT3R_RDLC_SHIFT 24
75#define LDMT3R_RCSC_MASK (0xff << 16)
76#define LDMT3R_RCSC_SHIFT 16
77#define LDMT3R_RCEC_MASK (0xff << 8)
78#define LDMT3R_RCEC_SHIFT 8
79#define LDMT3R_RCLW_MASK (0xff << 0)
80#define LDMT3R_RCLW_SHIFT 0
81#define LDDFR 0x424
82#define LDDFR_CF1 (1 << 18)
83#define LDDFR_CF0 (1 << 17)
84#define LDDFR_CC (1 << 16)
85#define LDDFR_YF_420 (0 << 8)
86#define LDDFR_YF_422 (1 << 8)
87#define LDDFR_YF_444 (2 << 8)
88#define LDDFR_YF_MASK (3 << 8)
89#define LDDFR_PKF_ARGB32 (0x00 << 0)
90#define LDDFR_PKF_RGB16 (0x03 << 0)
91#define LDDFR_PKF_RGB24 (0x0b << 0)
92#define LDDFR_PKF_MASK (0x1f << 0)
93#define LDSM1R 0x428
94#define LDSM1R_OS (1 << 0)
95#define LDSM2R 0x42c
96#define LDSM2R_OSTRG (1 << 0)
97#define LDSA1R 0x430
98#define LDSA2R 0x434
99#define LDMLSR 0x438
100#define LDWBFR 0x43c
101#define LDWBCNTR 0x440
102#define LDWBAR 0x444
103#define LDHCNR 0x448
104#define LDHSYNR 0x44c
105#define LDVLNR 0x450
106#define LDVSYNR 0x454
107#define LDHPDR 0x458
108#define LDVPDR 0x45c
109#define LDPMR 0x460
110#define LDPMR_LPS (3 << 0)
111#define LDINTR 0x468
112#define LDINTR_FE (1 << 10)
113#define LDINTR_VSE (1 << 9)
114#define LDINTR_VEE (1 << 8)
115#define LDINTR_FS (1 << 2)
116#define LDINTR_VSS (1 << 1)
117#define LDINTR_VES (1 << 0)
118#define LDINTR_STATUS_MASK (0xff << 0)
119#define LDSR 0x46c
120#define LDSR_MSS (1 << 10)
121#define LDSR_MRS (1 << 8)
122#define LDSR_AS (1 << 1)
123#define LDCNT1R 0x470
124#define LDCNT1R_DE (1 << 0)
125#define LDCNT2R 0x474
126#define LDCNT2R_BR (1 << 8)
127#define LDCNT2R_MD (1 << 3)
128#define LDCNT2R_SE (1 << 2)
129#define LDCNT2R_ME (1 << 1)
130#define LDCNT2R_DO (1 << 0)
131#define LDRCNTR 0x478
132#define LDRCNTR_SRS (1 << 17)
133#define LDRCNTR_SRC (1 << 16)
134#define LDRCNTR_MRS (1 << 1)
135#define LDRCNTR_MRC (1 << 0)
136#define LDDDSR 0x47c
137#define LDDDSR_LS (1 << 2)
138#define LDDDSR_WS (1 << 1)
139#define LDDDSR_BS (1 << 0)
140#define LDHAJR 0x4a0
141
142#define LDDWD0R 0x800
143#define LDDWDxR_WDACT (1 << 28)
144#define LDDWDxR_RSW (1 << 24)
145#define LDDRDR 0x840
146#define LDDRDR_RSR (1 << 24)
147#define LDDRDR_DRD_MASK (0x3ffff << 0)
148#define LDDWAR 0x900
149#define LDDWAR_WA (1 << 0)
150#define LDDRAR 0x904
151#define LDDRAR_RA (1 << 0)
152
153#define LDBCR 0xb00
154#define LDBCR_UPC(n) (1 << ((n) + 16))
155#define LDBCR_UPF(n) (1 << ((n) + 8))
156#define LDBCR_UPD(n) (1 << ((n) + 0))
157#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00)
158#define LDBBSIFR_EN (1 << 31)
159#define LDBBSIFR_VS (1 << 29)
160#define LDBBSIFR_BRSEL (1 << 28)
161#define LDBBSIFR_MX (1 << 27)
162#define LDBBSIFR_MY (1 << 26)
163#define LDBBSIFR_CV3 (3 << 24)
164#define LDBBSIFR_CV2 (2 << 24)
165#define LDBBSIFR_CV1 (1 << 24)
166#define LDBBSIFR_CV0 (0 << 24)
167#define LDBBSIFR_CV_MASK (3 << 24)
168#define LDBBSIFR_LAY_MASK (0xff << 16)
169#define LDBBSIFR_LAY_SHIFT 16
170#define LDBBSIFR_ROP3_MASK (0xff << 16)
171#define LDBBSIFR_ROP3_SHIFT 16
172#define LDBBSIFR_AL_PL8 (3 << 14)
173#define LDBBSIFR_AL_PL1 (2 << 14)
174#define LDBBSIFR_AL_PK (1 << 14)
175#define LDBBSIFR_AL_1 (0 << 14)
176#define LDBBSIFR_AL_MASK (3 << 14)
177#define LDBBSIFR_SWPL (1 << 10)
178#define LDBBSIFR_SWPW (1 << 9)
179#define LDBBSIFR_SWPB (1 << 8)
180#define LDBBSIFR_RY (1 << 7)
181#define LDBBSIFR_CHRR_420 (2 << 0)
182#define LDBBSIFR_CHRR_422 (1 << 0)
183#define LDBBSIFR_CHRR_444 (0 << 0)
184#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0)
185#define LDBBSIFR_RPKF_RGB16 (0x03 << 0)
186#define LDBBSIFR_RPKF_RGB24 (0x0b << 0)
187#define LDBBSIFR_RPKF_MASK (0x1f << 0)
188#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04)
189#define LDBBSSZR_BVSS_MASK (0xfff << 16)
190#define LDBBSSZR_BVSS_SHIFT 16
191#define LDBBSSZR_BHSS_MASK (0xfff << 0)
192#define LDBBSSZR_BHSS_SHIFT 0
193#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08)
194#define LDBBLOCR_CVLC_MASK (0xfff << 16)
195#define LDBBLOCR_CVLC_SHIFT 16
196#define LDBBLOCR_CHLC_MASK (0xfff << 0)
197#define LDBBLOCR_CHLC_SHIFT 0
198#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c)
199#define LDBBSMWR_BSMWA_MASK (0xffff << 16)
200#define LDBBSMWR_BSMWA_SHIFT 16
201#define LDBBSMWR_BSMW_MASK (0xffff << 0)
202#define LDBBSMWR_BSMW_SHIFT 0
203#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10)
204#define LDBBSAYR_FG1A_MASK (0xff << 24)
205#define LDBBSAYR_FG1A_SHIFT 24
206#define LDBBSAYR_FG1R_MASK (0xff << 16)
207#define LDBBSAYR_FG1R_SHIFT 16
208#define LDBBSAYR_FG1G_MASK (0xff << 8)
209#define LDBBSAYR_FG1G_SHIFT 8
210#define LDBBSAYR_FG1B_MASK (0xff << 0)
211#define LDBBSAYR_FG1B_SHIFT 0
212#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14)
213#define LDBBSACR_FG2A_MASK (0xff << 24)
214#define LDBBSACR_FG2A_SHIFT 24
215#define LDBBSACR_FG2R_MASK (0xff << 16)
216#define LDBBSACR_FG2R_SHIFT 16
217#define LDBBSACR_FG2G_MASK (0xff << 8)
218#define LDBBSACR_FG2G_SHIFT 8
219#define LDBBSACR_FG2B_MASK (0xff << 0)
220#define LDBBSACR_FG2B_SHIFT 0
221#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18)
222#define LDBBSAAR_AP_MASK (0xff << 24)
223#define LDBBSAAR_AP_SHIFT 24
224#define LDBBSAAR_R_MASK (0xff << 16)
225#define LDBBSAAR_R_SHIFT 16
226#define LDBBSAAR_GY_MASK (0xff << 8)
227#define LDBBSAAR_GY_SHIFT 8
228#define LDBBSAAR_B_MASK (0xff << 0)
229#define LDBBSAAR_B_SHIFT 0
230#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c)
231#define LDBBPPCR_AP_MASK (0xff << 24)
232#define LDBBPPCR_AP_SHIFT 24
233#define LDBBPPCR_R_MASK (0xff << 16)
234#define LDBBPPCR_R_SHIFT 16
235#define LDBBPPCR_GY_MASK (0xff << 8)
236#define LDBBPPCR_GY_SHIFT 8
237#define LDBBPPCR_B_MASK (0xff << 0)
238#define LDBBPPCR_B_SHIFT 0
239#define LDBnBBGCL(n) (0xb10 + (n) * 0x04)
240#define LDBBBGCL_BGA_MASK (0xff << 24)
241#define LDBBBGCL_BGA_SHIFT 24
242#define LDBBBGCL_BGR_MASK (0xff << 16)
243#define LDBBBGCL_BGR_SHIFT 16
244#define LDBBBGCL_BGG_MASK (0xff << 8)
245#define LDBBBGCL_BGG_SHIFT 8
246#define LDBBBGCL_BGB_MASK (0xff << 0)
247#define LDBBBGCL_BGB_SHIFT 0
248
249#define LCDC_SIDE_B_OFFSET 0x1000
250#define LCDC_MIRROR_OFFSET 0x2000
251
252static inline bool lcdc_is_banked(u32 reg)
253{
254 switch (reg) {
255 case LDMT1R:
256 case LDMT2R:
257 case LDMT3R:
258 case LDDFR:
259 case LDSM1R:
260 case LDSA1R:
261 case LDSA2R:
262 case LDMLSR:
263 case LDWBFR:
264 case LDWBCNTR:
265 case LDWBAR:
266 case LDHCNR:
267 case LDHSYNR:
268 case LDVLNR:
269 case LDVSYNR:
270 case LDHPDR:
271 case LDVPDR:
272 case LDHAJR:
273 return true;
274 default:
275 return reg >= LDBnBBGCL(0) && reg <= LDBnBPPCR(3);
276 }
277}
278
279static inline void lcdc_write_mirror(struct shmob_drm_device *sdev, u32 reg,
280 u32 data)
281{
282 iowrite32(data, sdev->mmio + reg + LCDC_MIRROR_OFFSET);
283}
284
285static inline void lcdc_write(struct shmob_drm_device *sdev, u32 reg, u32 data)
286{
287 iowrite32(data, sdev->mmio + reg);
288 if (lcdc_is_banked(reg))
289 iowrite32(data, sdev->mmio + reg + LCDC_SIDE_B_OFFSET);
290}
291
292static inline u32 lcdc_read(struct shmob_drm_device *sdev, u32 reg)
293{
294 return ioread32(sdev->mmio + reg);
295}
296
297static inline int lcdc_wait_bit(struct shmob_drm_device *sdev, u32 reg,
298 u32 mask, u32 until)
299{
300 unsigned long timeout = jiffies + msecs_to_jiffies(5);
301
302 while ((lcdc_read(sdev, reg) & mask) != until) {
303 if (time_after(jiffies, timeout))
304 return -ETIMEDOUT;
305 cpu_relax();
306 }
307
308 return 0;
309}
310
311#endif /* __SHMOB_DRM_REGS_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2a4aa57779e7..2026060f03e0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -472,7 +472,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
472 else 472 else
473 tmp = pgprot_noncached(tmp); 473 tmp = pgprot_noncached(tmp);
474#endif 474#endif
475#if defined(__sparc__) 475#if defined(__sparc__) || defined(__mips__)
476 if (!(caching_flags & TTM_PL_FLAG_CACHED)) 476 if (!(caching_flags & TTM_PL_FLAG_CACHED))
477 tmp = pgprot_noncached(tmp); 477 tmp = pgprot_noncached(tmp);
478#endif 478#endif
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d4aa5a82ab1b..b8b394319b45 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -1060,7 +1060,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1060 1060
1061 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); 1061 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1062 if (!_manager) 1062 if (!_manager)
1063 goto err_manager; 1063 goto err;
1064 1064
1065 mutex_init(&_manager->lock); 1065 mutex_init(&_manager->lock);
1066 INIT_LIST_HEAD(&_manager->pools); 1066 INIT_LIST_HEAD(&_manager->pools);
@@ -1078,9 +1078,6 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1078 } 1078 }
1079 ttm_dma_pool_mm_shrink_init(_manager); 1079 ttm_dma_pool_mm_shrink_init(_manager);
1080 return 0; 1080 return 0;
1081err_manager:
1082 kfree(_manager);
1083 _manager = NULL;
1084err: 1081err:
1085 return ret; 1082 return ret;
1086} 1083}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 82a529e45afe..bf8260133ea9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -290,8 +290,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
290 struct file *swap_storage; 290 struct file *swap_storage;
291 struct page *from_page; 291 struct page *from_page;
292 struct page *to_page; 292 struct page *to_page;
293 void *from_virtual;
294 void *to_virtual;
295 int i; 293 int i;
296 int ret = -ENOMEM; 294 int ret = -ENOMEM;
297 295
@@ -311,11 +309,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
311 goto out_err; 309 goto out_err;
312 310
313 preempt_disable(); 311 preempt_disable();
314 from_virtual = kmap_atomic(from_page); 312 copy_highpage(to_page, from_page);
315 to_virtual = kmap_atomic(to_page);
316 memcpy(to_virtual, from_virtual, PAGE_SIZE);
317 kunmap_atomic(to_virtual);
318 kunmap_atomic(from_virtual);
319 preempt_enable(); 313 preempt_enable();
320 page_cache_release(from_page); 314 page_cache_release(from_page);
321 } 315 }
@@ -336,8 +330,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
336 struct file *swap_storage; 330 struct file *swap_storage;
337 struct page *from_page; 331 struct page *from_page;
338 struct page *to_page; 332 struct page *to_page;
339 void *from_virtual;
340 void *to_virtual;
341 int i; 333 int i;
342 int ret = -ENOMEM; 334 int ret = -ENOMEM;
343 335
@@ -367,11 +359,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
367 goto out_err; 359 goto out_err;
368 } 360 }
369 preempt_disable(); 361 preempt_disable();
370 from_virtual = kmap_atomic(from_page); 362 copy_highpage(to_page, from_page);
371 to_virtual = kmap_atomic(to_page);
372 memcpy(to_virtual, from_virtual, PAGE_SIZE);
373 kunmap_atomic(to_virtual);
374 kunmap_atomic(from_virtual);
375 preempt_enable(); 363 preempt_enable();
376 set_page_dirty(to_page); 364 set_page_dirty(to_page);
377 mark_page_accessed(to_page); 365 mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 627cd85521b1..b3b2cedf6745 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -57,11 +57,8 @@ static int udl_get_modes(struct drm_connector *connector)
57 57
58 edid = (struct edid *)udl_get_edid(udl); 58 edid = (struct edid *)udl_get_edid(udl);
59 59
60 connector->display_info.raw_edid = (char *)edid;
61
62 drm_mode_connector_update_edid_property(connector, edid); 60 drm_mode_connector_update_edid_property(connector, edid);
63 ret = drm_add_edid_modes(connector, edid); 61 ret = drm_add_edid_modes(connector, edid);
64 connector->display_info.raw_edid = NULL;
65 kfree(edid); 62 kfree(edid);
66 return ret; 63 return ret;
67} 64}
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 610538308f19..4052c4656498 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -16,7 +16,7 @@
16#include "udl_drv.h" 16#include "udl_drv.h"
17 17
18/* dummy encoder */ 18/* dummy encoder */
19void udl_enc_destroy(struct drm_encoder *encoder) 19static void udl_enc_destroy(struct drm_encoder *encoder)
20{ 20{
21 drm_encoder_cleanup(encoder); 21 drm_encoder_cleanup(encoder);
22 kfree(encoder); 22 kfree(encoder);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 877df059a76f..67df842fbb33 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/fb.h> 15#include <linux/fb.h>
16#include <linux/dma-buf.h>
16 17
17#include <drm/drmP.h> 18#include <drm/drmP.h>
18#include <drm/drm_crtc.h> 19#include <drm/drm_crtc.h>
@@ -355,12 +356,12 @@ static struct fb_ops udlfb_ops = {
355 .fb_release = udl_fb_release, 356 .fb_release = udl_fb_release,
356}; 357};
357 358
358void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 359static void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
359 u16 blue, int regno) 360 u16 blue, int regno)
360{ 361{
361} 362}
362 363
363void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 364static void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
364 u16 *blue, int regno) 365 u16 *blue, int regno)
365{ 366{
366 *red = 0; 367 *red = 0;
@@ -376,16 +377,33 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
376{ 377{
377 struct udl_framebuffer *ufb = to_udl_fb(fb); 378 struct udl_framebuffer *ufb = to_udl_fb(fb);
378 int i; 379 int i;
380 int ret = 0;
379 381
380 if (!ufb->active_16) 382 if (!ufb->active_16)
381 return 0; 383 return 0;
382 384
385 if (ufb->obj->base.import_attach) {
386 ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
387 0, ufb->obj->base.size,
388 DMA_FROM_DEVICE);
389 if (ret)
390 return ret;
391 }
392
383 for (i = 0; i < num_clips; i++) { 393 for (i = 0; i < num_clips; i++) {
384 udl_handle_damage(ufb, clips[i].x1, clips[i].y1, 394 ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
385 clips[i].x2 - clips[i].x1, 395 clips[i].x2 - clips[i].x1,
386 clips[i].y2 - clips[i].y1); 396 clips[i].y2 - clips[i].y1);
397 if (ret)
398 break;
387 } 399 }
388 return 0; 400
401 if (ufb->obj->base.import_attach) {
402 dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
403 0, ufb->obj->base.size,
404 DMA_FROM_DEVICE);
405 }
406 return ret;
389} 407}
390 408
391static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb) 409static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 4acc8c7431cd..afd212c99216 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -181,11 +181,6 @@ int udl_gem_vmap(struct udl_gem_object *obj)
181 int ret; 181 int ret;
182 182
183 if (obj->base.import_attach) { 183 if (obj->base.import_attach) {
184 ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185 0, obj->base.size, DMA_BIDIRECTIONAL);
186 if (ret)
187 return -EINVAL;
188
189 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf); 184 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
190 if (!obj->vmapping) 185 if (!obj->vmapping)
191 return -ENOMEM; 186 return -ENOMEM;
@@ -206,8 +201,6 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
206{ 201{
207 if (obj->base.import_attach) { 202 if (obj->base.import_attach) {
208 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping); 203 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209 dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210 obj->base.size, DMA_BIDIRECTIONAL);
211 return; 204 return;
212 } 205 }
213 206
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1f6dbfd62c2a..0ce2d7195256 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,11 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 41 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 42 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
43 if (total_len > 5) { 43 if (total_len > 5) {
44 DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \ 44 DRM_INFO("vendor descriptor length:%x data:%*ph\n",
45 "%02x %02x %02x %02x %02x %02x %02x\n", 45 total_len, 11, desc);
46 total_len, desc[0],
47 desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
48 desc[7], desc[8], desc[9], desc[10]);
49 46
50 if ((desc[0] != total_len) || /* descriptor length */ 47 if ((desc[0] != total_len) || /* descriptor length */
51 (desc[1] != 0x5f) || /* vendor descriptor type */ 48 (desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 52ac2b2d9b73..e96d2349bd54 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -391,7 +391,7 @@ static const struct drm_crtc_funcs udl_crtc_funcs = {
391 .destroy = udl_crtc_destroy, 391 .destroy = udl_crtc_destroy,
392}; 392};
393 393
394int udl_crtc_init(struct drm_device *dev) 394static int udl_crtc_init(struct drm_device *dev)
395{ 395{
396 struct drm_crtc *crtc; 396 struct drm_crtc *crtc;
397 397
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index e96348143a4e..dc095526ffb7 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -126,10 +126,10 @@ static void udl_compress_hline16(
126 126
127 while ((pixel_end > pixel) && 127 while ((pixel_end > pixel) &&
128 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { 128 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
129 uint8_t *raw_pixels_count_byte = 0; 129 uint8_t *raw_pixels_count_byte = NULL;
130 uint8_t *cmd_pixels_count_byte = 0; 130 uint8_t *cmd_pixels_count_byte = NULL;
131 const u8 *raw_pixel_start = 0; 131 const u8 *raw_pixel_start = NULL;
132 const u8 *cmd_pixel_start, *cmd_pixel_end = 0; 132 const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
133 133
134 prefetchw((void *) cmd); /* pull in one cache line at least */ 134 prefetchw((void *) cmd); /* pull in one cache line at least */
135 135
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c84d9ba66f3b..ed3c1e7ddde9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -438,7 +438,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
438 DRM_ERROR("Failed allocating a device private struct.\n"); 438 DRM_ERROR("Failed allocating a device private struct.\n");
439 return -ENOMEM; 439 return -ENOMEM;
440 } 440 }
441 memset(dev_priv, 0, sizeof(*dev_priv));
442 441
443 pci_set_master(dev->pdev); 442 pci_set_master(dev->pdev);
444 443
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index c50724bd30f6..54743943d8b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -483,7 +483,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
483 } 483 }
484 484
485 /* only need to do this once */ 485 /* only need to do this once */
486 memset(cmd, 0, fifo_size);
487 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 486 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
488 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 487 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
489 488